repository_name
stringclasses
316 values
func_path_in_repository
stringlengths
6
223
func_name
stringlengths
1
134
language
stringclasses
1 value
func_code_string
stringlengths
57
65.5k
func_documentation_string
stringlengths
1
46.3k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
called_functions
listlengths
1
156
enclosing_scope
stringlengths
2
1.48M
saltstack/salt
salt/minion.py
get_proc_dir
python
def get_proc_dir(cachedir, **kwargs): ''' Given the cache directory, return the directory that process data is stored in, creating it if it doesn't exist. The following optional Keyword Arguments are handled: mode: which is anything os.makedir would accept as mode. uid: the uid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this uid. Must be int. Works only on unix/unix like systems. gid: the gid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this gid. Must be int. Works only on unix/unix like systems. ''' fn_ = os.path.join(cachedir, 'proc') mode = kwargs.pop('mode', None) if mode is None: mode = {} else: mode = {'mode': mode} if not os.path.isdir(fn_): # proc_dir is not present, create it with mode settings os.makedirs(fn_, **mode) d_stat = os.stat(fn_) # if mode is not an empty dict then we have an explicit # dir mode. So lets check if mode needs to be changed. if mode: mode_part = S_IMODE(d_stat.st_mode) if mode_part != mode['mode']: os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode']) if hasattr(os, 'chown'): # only on unix/unix like systems uid = kwargs.pop('uid', -1) gid = kwargs.pop('gid', -1) # if uid and gid are both -1 then go ahead with # no changes at all if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \ [i for i in (uid, gid) if i != -1]: os.chown(fn_, uid, gid) return fn_
Given the cache directory, return the directory that process data is stored in, creating it if it doesn't exist. The following optional Keyword Arguments are handled: mode: which is anything os.makedir would accept as mode. uid: the uid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this uid. Must be int. Works only on unix/unix like systems. gid: the gid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this gid. Must be int. Works only on unix/unix like systems.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L272-L320
null
# -*- coding: utf-8 -*- ''' Routines to set up a minion ''' # Import python libs from __future__ import absolute_import, print_function, with_statement, unicode_literals import functools import os import sys import copy import time import types import signal import random import logging import threading import traceback import contextlib import multiprocessing from random import randint, shuffle from stat import S_IMODE import salt.serializers.msgpack from binascii import crc32 # Import Salt Libs # pylint: disable=import-error,no-name-in-module,redefined-builtin from salt.ext import six from salt._compat import ipaddress from salt.utils.network import parse_host_port from salt.ext.six.moves import range from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO import salt.transport.client import salt.defaults.exitcodes from salt.utils.ctx import RequestContext # pylint: enable=no-name-in-module,redefined-builtin import tornado HAS_PSUTIL = False try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True except ImportError: pass HAS_RESOURCE = False try: import resource HAS_RESOURCE = True except ImportError: pass try: import zmq.utils.monitor HAS_ZMQ_MONITOR = True except ImportError: HAS_ZMQ_MONITOR = False try: import salt.utils.win_functions HAS_WIN_FUNCTIONS = True except ImportError: HAS_WIN_FUNCTIONS = False # pylint: enable=import-error # Import salt libs import salt import salt.client import salt.crypt import salt.loader import salt.beacons import salt.engines import salt.payload import salt.pillar import salt.syspaths import salt.utils.args import salt.utils.context import salt.utils.data import salt.utils.error import salt.utils.event import salt.utils.files import salt.utils.jid import salt.utils.minion import salt.utils.minions import salt.utils.network import salt.utils.platform import salt.utils.process import salt.utils.schedule import salt.utils.ssdp import salt.utils.user import salt.utils.zeromq import salt.defaults.events import salt.defaults.exitcodes import salt.cli.daemons import salt.log.setup import salt.utils.dictupdate from salt.config import DEFAULT_MINION_OPTS from salt.defaults import DEFAULT_TARGET_DELIM from salt.utils.debug import enable_sigusr1_handler from salt.utils.event import tagify from salt.utils.odict import OrderedDict from salt.utils.process import (default_signals, SignalHandlingMultiprocessingProcess, ProcessManager) from salt.exceptions import ( CommandExecutionError, CommandNotFoundError, SaltInvocationError, SaltReqTimeoutError, SaltClientError, SaltSystemExit, SaltDaemonNotRunning, SaltException, SaltMasterUnresolvableError ) import tornado.gen # pylint: disable=F0401 import tornado.ioloop # pylint: disable=F0401 log = logging.getLogger(__name__) # To set up a minion: # 1. Read in the configuration # 2. Generate the function mapping dict # 3. Authenticate with the master # 4. Store the AES key # 5. Connect to the publisher # 6. Handle publications def resolve_dns(opts, fallback=True): ''' Resolves the master_ip and master_uri options ''' ret = {} check_dns = True if (opts.get('file_client', 'remote') == 'local' and not opts.get('use_master_when_local', False)): check_dns = False # Since salt.log is imported below, salt.utils.network needs to be imported here as well import salt.utils.network if check_dns is True: try: if opts['master'] == '': raise SaltSystemExit ret['master_ip'] = salt.utils.network.dns_check( opts['master'], int(opts['master_port']), True, opts['ipv6'], attempt_connect=False) except SaltClientError: retry_dns_count = opts.get('retry_dns_count', None) if opts['retry_dns']: while True: if retry_dns_count is not None: if retry_dns_count == 0: raise SaltMasterUnresolvableError retry_dns_count -= 1 import salt.log msg = ('Master hostname: \'{0}\' not found or not responsive. ' 'Retrying in {1} seconds').format(opts['master'], opts['retry_dns']) if salt.log.setup.is_console_configured(): log.error(msg) else: print('WARNING: {0}'.format(msg)) time.sleep(opts['retry_dns']) try: ret['master_ip'] = salt.utils.network.dns_check( opts['master'], int(opts['master_port']), True, opts['ipv6'], attempt_connect=False) break except SaltClientError: pass else: if fallback: ret['master_ip'] = '127.0.0.1' else: raise except SaltSystemExit: unknown_str = 'unknown address' master = opts.get('master', unknown_str) if master == '': master = unknown_str if opts.get('__role') == 'syndic': err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'syndic_master\' value in minion config.'.format(master) else: err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'master\' value in minion config.'.format(master) log.error(err) raise SaltSystemExit(code=42, msg=err) else: ret['master_ip'] = '127.0.0.1' if 'master_ip' in ret and 'master_ip' in opts: if ret['master_ip'] != opts['master_ip']: log.warning( 'Master ip address changed from %s to %s', opts['master_ip'], ret['master_ip'] ) if opts['source_interface_name']: log.trace('Custom source interface required: %s', opts['source_interface_name']) interfaces = salt.utils.network.interfaces() log.trace('The following interfaces are available on this Minion:') log.trace(interfaces) if opts['source_interface_name'] in interfaces: if interfaces[opts['source_interface_name']]['up']: addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\ interfaces[opts['source_interface_name']]['inet6'] ret['source_ip'] = addrs[0]['address'] log.debug('Using %s as source IP address', ret['source_ip']) else: log.warning('The interface %s is down so it cannot be used as source to connect to the Master', opts['source_interface_name']) else: log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name']) elif opts['source_address']: ret['source_ip'] = salt.utils.network.dns_check( opts['source_address'], int(opts['source_ret_port']), True, opts['ipv6'], attempt_connect=False) log.debug('Using %s as source IP address', ret['source_ip']) if opts['source_ret_port']: ret['source_ret_port'] = int(opts['source_ret_port']) log.debug('Using %d as source port for the ret server', ret['source_ret_port']) if opts['source_publish_port']: ret['source_publish_port'] = int(opts['source_publish_port']) log.debug('Using %d as source port for the master pub', ret['source_publish_port']) ret['master_uri'] = 'tcp://{ip}:{port}'.format( ip=ret['master_ip'], port=opts['master_port']) log.debug('Master URI: %s', ret['master_uri']) return ret def prep_ip_port(opts): ''' parse host:port values from opts['master'] and return valid: master: ip address or hostname as a string master_port: (optional) master returner port as integer e.g.: - master: 'localhost:1234' -> {'master': 'localhost', 'master_port': 1234} - master: '127.0.0.1:1234' -> {'master': '127.0.0.1', 'master_port' :1234} - master: '[::1]:1234' -> {'master': '::1', 'master_port': 1234} - master: 'fe80::a00:27ff:fedc:ba98' -> {'master': 'fe80::a00:27ff:fedc:ba98'} ''' ret = {} # Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without # a port specified. The is_ipv6 check returns False if brackets are used in the IP # definition such as master: '[::1]:1234'. if opts['master_uri_format'] == 'ip_only': ret['master'] = ipaddress.ip_address(opts['master']) else: host, port = parse_host_port(opts['master']) ret = {'master': host} if port: ret.update({'master_port': port}) return ret def load_args_and_kwargs(func, args, data=None, ignore_invalid=False): ''' Detect the args and kwargs that need to be passed to a function call, and check them against what was passed. ''' argspec = salt.utils.args.get_function_argspec(func) _args = [] _kwargs = {} invalid_kwargs = [] for arg in args: if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True: # if the arg is a dict with __kwarg__ == True, then its a kwarg for key, val in six.iteritems(arg): if argspec.keywords or key in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs[key] = val else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. invalid_kwargs.append('{0}={1}'.format(key, val)) continue else: string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632 if string_kwarg: if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs.update(string_kwarg) else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. for key, val in six.iteritems(string_kwarg): invalid_kwargs.append('{0}={1}'.format(key, val)) else: _args.append(arg) if invalid_kwargs and not ignore_invalid: salt.utils.args.invalid_kwargs(invalid_kwargs) if argspec.keywords and isinstance(data, dict): # this function accepts **kwargs, pack in the publish data for key, val in six.iteritems(data): _kwargs['__pub_{0}'.format(key)] = val return _args, _kwargs def eval_master_func(opts): ''' Evaluate master function if master type is 'func' and save it result in opts['master'] ''' if '__master_func_evaluated' not in opts: # split module and function and try loading the module mod_fun = opts['master'] mod, fun = mod_fun.split('.') try: master_mod = salt.loader.raw_mod(opts, mod, fun) if not master_mod: raise KeyError # we take whatever the module returns as master address opts['master'] = master_mod[mod_fun]() # Check for valid types if not isinstance(opts['master'], (six.string_types, list)): raise TypeError opts['__master_func_evaluated'] = True except KeyError: log.error('Failed to load module %s', mod_fun) sys.exit(salt.defaults.exitcodes.EX_GENERIC) except TypeError: log.error('%s returned from %s is not a string', opts['master'], mod_fun) sys.exit(salt.defaults.exitcodes.EX_GENERIC) log.info('Evaluated master from module: %s', mod_fun) def master_event(type, master=None): ''' Centralized master event function which will return event type based on event_map ''' event_map = {'connected': '__master_connected', 'disconnected': '__master_disconnected', 'failback': '__master_failback', 'alive': '__master_alive'} if type == 'alive' and master is not None: return '{0}_{1}'.format(event_map.get(type), master) return event_map.get(type, None) def service_name(): ''' Return the proper service name based on platform ''' return 'salt_minion' if 'bsd' in sys.platform else 'salt-minion' class MinionBase(object): def __init__(self, opts): self.opts = opts @staticmethod def process_schedule(minion, loop_interval): try: if hasattr(minion, 'schedule'): minion.schedule.eval() else: log.error('Minion scheduler not initialized. Scheduled jobs will not be run.') return # Check if scheduler requires lower loop interval than # the loop_interval setting if minion.schedule.loop_interval < loop_interval: loop_interval = minion.schedule.loop_interval log.debug( 'Overriding loop_interval because of scheduled jobs.' ) except Exception as exc: log.error('Exception %s occurred in scheduled job', exc) return loop_interval def process_beacons(self, functions): ''' Evaluate all of the configured beacons, grab the config again in case the pillar or grains changed ''' if 'config.merge' in functions: b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True) if b_conf: return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member return [] @tornado.gen.coroutine def eval_master(self, opts, timeout=60, safe=True, failed=False, failback=False): ''' Evaluates and returns a tuple of the current master address and the pub_channel. In standard mode, just creates a pub_channel with the given master address. With master_type=func evaluates the current master address from the given module and then creates a pub_channel. With master_type=failover takes the list of masters and loops through them. The first one that allows the minion to create a pub_channel is then returned. If this function is called outside the minions initialization phase (for example from the minions main event-loop when a master connection loss was detected), 'failed' should be set to True. The current (possibly failed) master will then be removed from the list of masters. ''' # return early if we are not connecting to a master if opts['master_type'] == 'disable': log.warning('Master is set to disable, skipping connection') self.connected = False raise tornado.gen.Return((None, None)) # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. # if we are using multimaster, discovery can only happen at start time # because MinionManager handles it. by eval_master time the minion doesn't # know about other siblings currently running if isinstance(self.opts['discovery'], dict) and not self.opts['discovery'].get('multimaster'): self._discover_masters() # check if master_type was altered from its default if opts['master_type'] != 'str' and opts['__role'] != 'syndic': # check for a valid keyword if opts['master_type'] == 'func': eval_master_func(opts) # if failover or distributed is set, master has to be of type list elif opts['master_type'] in ('failover', 'distributed'): if isinstance(opts['master'], list): log.info( 'Got list of available master addresses: %s', opts['master'] ) if opts['master_type'] == 'distributed': master_len = len(opts['master']) if master_len > 1: secondary_masters = opts['master'][1:] master_idx = crc32(opts['id']) % master_len try: preferred_masters = opts['master'] preferred_masters[0] = opts['master'][master_idx] preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]] opts['master'] = preferred_masters log.info('Distributed to the master at \'%s\'.', opts['master'][0]) except (KeyError, AttributeError, TypeError): log.warning('Failed to distribute to a specific master.') else: log.warning('master_type = distributed needs more than 1 master.') if opts['master_shuffle']: log.warning( 'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor ' 'of \'random_master\'. Please update your minion config file.' ) opts['random_master'] = opts['master_shuffle'] opts['auth_tries'] = 0 if opts['master_failback'] and opts['master_failback_interval'] == 0: opts['master_failback_interval'] = opts['master_alive_interval'] # if opts['master'] is a str and we have never created opts['master_list'] elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts): # We have a string, but a list was what was intended. Convert. # See issue 23611 for details opts['master'] = [opts['master']] elif opts['__role'] == 'syndic': log.info('Syndic setting master_syndic to \'%s\'', opts['master']) # if failed=True, the minion was previously connected # we're probably called from the minions main-event-loop # because a master connection loss was detected. remove # the possibly failed master from the list of masters. elif failed: if failback: # failback list of masters to original config opts['master'] = opts['master_list'] else: log.info( 'Moving possibly failed master %s to the end of ' 'the list of masters', opts['master'] ) if opts['master'] in opts['local_masters']: # create new list of master with the possibly failed # one moved to the end failed_master = opts['master'] opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x] opts['master'].append(failed_master) else: opts['master'] = opts['master_list'] else: msg = ('master_type set to \'failover\' but \'master\' ' 'is not of type list but of type ' '{0}'.format(type(opts['master']))) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # If failover is set, minion have to failover on DNS errors instead of retry DNS resolve. # See issue 21082 for details if opts['retry_dns'] and opts['master_type'] == 'failover': msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. ' 'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.') log.critical(msg) opts['retry_dns'] = 0 else: msg = ('Invalid keyword \'{0}\' for variable ' '\'master_type\''.format(opts['master_type'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # FIXME: if SMinion don't define io_loop, it can't switch master see #29088 # Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop # (The channel factories will set a default if the kwarg isn't passed) factory_kwargs = {'timeout': timeout, 'safe': safe} if getattr(self, 'io_loop', None): factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member tries = opts.get('master_tries', 1) attempts = 0 # if we have a list of masters, loop through them and be # happy with the first one that allows us to connect if isinstance(opts['master'], list): conn = False last_exc = None opts['master_uri_list'] = [] opts['local_masters'] = copy.copy(opts['master']) # shuffle the masters and then loop through them if opts['random_master']: # master_failback is only used when master_type is set to failover if opts['master_type'] == 'failover' and opts['master_failback']: secondary_masters = opts['local_masters'][1:] shuffle(secondary_masters) opts['local_masters'][1:] = secondary_masters else: shuffle(opts['local_masters']) # This sits outside of the connection loop below because it needs to set # up a list of master URIs regardless of which masters are available # to connect _to_. This is primarily used for masterless mode, when # we need a list of master URIs to fire calls back to. for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts['master_uri_list'].append(resolve_dns(opts)['master_uri']) while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) # on first run, update self.opts with the whole master list # to enable a minion to re-use old masters if they get fixed if 'master_list' not in opts: opts['master_list'] = copy.copy(opts['local_masters']) self.opts = opts pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs) try: yield pub_channel.connect() conn = True break except SaltClientError as exc: last_exc = exc if exc.strerror.startswith('Could not access'): msg = ( 'Failed to initiate connection with Master ' '%s: check ownership/permissions. Error ' 'message: %s', opts['master'], exc ) else: msg = ('Master %s could not be reached, trying next ' 'next master (if any)', opts['master']) log.info(msg) continue if not conn: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False self.opts['master'] = copy.copy(self.opts['local_masters']) log.error( 'No master could be reached or all masters ' 'denied the minion\'s connection attempt.' ) # If the code reaches this point, 'last_exc' # should already be set. raise last_exc # pylint: disable=E0702 else: self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) # single master sign in else: if opts['random_master']: log.warning('random_master is True but there is only one master specified. Ignoring.') while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) try: if self.opts['transport'] == 'detect': self.opts['detect_mode'] = True for trans in ('zeromq', 'tcp'): if trans == 'zeromq' and not zmq: continue self.opts['transport'] = trans pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() if not pub_channel.auth.authenticated: continue del self.opts['detect_mode'] break else: pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) except SaltClientError as exc: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False raise exc def _discover_masters(self): ''' Discover master(s) and decide where to connect, if SSDP is around. This modifies the configuration on the fly. :return: ''' if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False: master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient() masters = {} for att in range(self.opts['discovery'].get('attempts', 3)): try: att += 1 log.info('Attempting %s time(s) to discover masters', att) masters.update(master_discovery_client.discover()) if not masters: time.sleep(self.opts['discovery'].get('pause', 5)) else: break except Exception as err: log.error('SSDP discovery failure: %s', err) break if masters: policy = self.opts.get('discovery', {}).get('match', 'any') if policy not in ['any', 'all']: log.error('SSDP configuration matcher failure: unknown value "%s". ' 'Should be "any" or "all"', policy) return mapping = self.opts['discovery'].get('mapping', {}) discovered = [] for addr, mappings in masters.items(): for proto_data in mappings: cnt = len([key for key, value in mapping.items() if proto_data.get('mapping', {}).get(key) == value]) if policy == 'any' and bool(cnt) or cnt == len(mapping): if self.opts['discovery'].get('multimaster'): discovered.append(proto_data['master']) else: self.opts['master'] = proto_data['master'] return self.opts['master'] = discovered def _return_retry_timer(self): ''' Based on the minion configuration, either return a randomized timer or just return the value of the return_retry_timer. ''' msg = 'Minion return retry timer set to %s seconds' if self.opts.get('return_retry_timer_max'): try: random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max']) retry_msg = msg % random_retry log.debug('%s (randomized)', msg % random_retry) return random_retry except ValueError: # Catch wiseguys using negative integers here log.error( 'Invalid value (return_retry_timer: %s or ' 'return_retry_timer_max: %s). Both must be positive ' 'integers.', self.opts['return_retry_timer'], self.opts['return_retry_timer_max'], ) log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer']) return DEFAULT_MINION_OPTS['return_retry_timer'] else: log.debug(msg, self.opts.get('return_retry_timer')) return self.opts.get('return_retry_timer') class SMinion(MinionBase): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def __init__(self, opts): # Late setup of the opts grains, so we can log from the grains module import salt.loader opts['grains'] = salt.loader.grains(opts) super(SMinion, self).__init__(opts) # run ssdp discovery if necessary self._discover_masters() # Clean out the proc directory (default /var/cache/salt/minion/proc) if (self.opts.get('file_client', 'remote') == 'remote' or self.opts.get('use_master_when_local', False)): install_zmq() io_loop = ZMQDefaultLoop.current() io_loop.run_sync( lambda: self.eval_master(self.opts, failed=True) ) self.gen_modules(initial_load=True) # If configured, cache pillar data on the minion if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False): import salt.utils.yaml pdir = os.path.join(self.opts['cachedir'], 'pillar') if not os.path.isdir(pdir): os.makedirs(pdir, 0o700) ptop = os.path.join(pdir, 'top.sls') if self.opts['saltenv'] is not None: penv = self.opts['saltenv'] else: penv = 'base' cache_top = {penv: {self.opts['id']: ['cache']}} with salt.utils.files.fopen(ptop, 'wb') as fp_: salt.utils.yaml.safe_dump(cache_top, fp_) os.chmod(ptop, 0o600) cache_sls = os.path.join(pdir, 'cache.sls') with salt.utils.files.fopen(cache_sls, 'wb') as fp_: salt.utils.yaml.safe_dump(self.opts['pillar'], fp_) os.chmod(cache_sls, 0o600) def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.serializers = salt.loader.serializers(self.opts) self.returners = salt.loader.returners(self.opts, self.functions) self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None) # TODO: remove self.function_errors = {} # Keep the funcs clean self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers) self.rend = salt.loader.render(self.opts, self.functions) # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts) class MasterMinion(object): ''' Create a fully loaded minion function object for generic use on the master. What makes this class different is that the pillar is omitted, otherwise everything else is loaded cleanly. ''' def __init__( self, opts, returners=True, states=True, rend=True, matcher=True, whitelist=None, ignore_config_errors=True): self.opts = salt.config.minion_config( opts['conf_file'], ignore_config_errors=ignore_config_errors, role='master' ) self.opts.update(opts) self.whitelist = whitelist self.opts['grains'] = salt.loader.grains(opts) self.opts['pillar'] = {} self.mk_returners = returners self.mk_states = states self.mk_rend = rend self.mk_matcher = matcher self.gen_modules(initial_load=True) def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods( self.opts, utils=self.utils, whitelist=self.whitelist, initial_load=initial_load) self.serializers = salt.loader.serializers(self.opts) if self.mk_returners: self.returners = salt.loader.returners(self.opts, self.functions) if self.mk_states: self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers) if self.mk_rend: self.rend = salt.loader.render(self.opts, self.functions) if self.mk_matcher: self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules class MinionManager(MinionBase): ''' Create a multi minion interface, this creates as many minions as are defined in the master option and binds each minion object to a respective master. ''' def __init__(self, opts): super(MinionManager, self).__init__(opts) self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self.minions = [] self.jid_queue = [] install_zmq() self.io_loop = ZMQDefaultLoop.current() self.process_manager = ProcessManager(name='MultiMinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat def __del__(self): self.destroy() def _bind(self): # start up the event publisher, so we can see events during startup self.event_publisher = salt.utils.event.AsyncEventPublisher( self.opts, io_loop=self.io_loop, ) self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop) self.event.subscribe('') self.event.set_event_handler(self.handle_event) @tornado.gen.coroutine def handle_event(self, package): yield [minion.handle_event(package) for minion in self.minions] def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None): ''' Helper function to return the correct type of object ''' return Minion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue) def _check_minions(self): ''' Check the size of self.minions and raise an error if it's empty ''' if not self.minions: err = ('Minion unable to successfully connect to ' 'a Salt Master.') log.error(err) def _spawn_minions(self, timeout=60): ''' Spawn all the coroutines which will sign in to masters ''' # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. If match is 'any' we let # eval_master handle the discovery instead so disconnections can also handle # discovery if isinstance(self.opts['discovery'], dict) and self.opts['discovery'].get('multimaster'): self._discover_masters() masters = self.opts['master'] if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list): masters = [masters] for master in masters: s_opts = copy.deepcopy(self.opts) s_opts['master'] = master s_opts['multimaster'] = True minion = self._create_minion_object(s_opts, s_opts['auth_timeout'], False, io_loop=self.io_loop, loaded_base_name='salt.loader.{0}'.format(s_opts['master']), jid_queue=self.jid_queue) self.io_loop.spawn_callback(self._connect_minion, minion) self.io_loop.call_later(timeout, self._check_minions) @tornado.gen.coroutine def _connect_minion(self, minion): ''' Create a minion, and asynchronously connect it to a master ''' auth_wait = minion.opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", minion.opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? try: if minion.opts.get('beacons_before_connect', False): minion.setup_beacons(before_connect=True) if minion.opts.get('scheduler_before_connect', False): minion.setup_scheduler(before_connect=True) yield minion.connect_master(failed=failed) minion.tune_in(start=False) self.minions.append(minion) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up minion for multi-master. Is ' 'master at %s responding?', minion.opts['master'] ) except SaltMasterUnresolvableError: err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'master\' value in minion config.'.format(minion.opts['master']) log.error(err) break except Exception as e: failed = True log.critical( 'Unexpected error while connecting to %s', minion.opts['master'], exc_info=True ) # Multi Master Tune In def tune_in(self): ''' Bind to the masters This loop will attempt to create connections to masters it hasn't connected to yet, but once the initial connection is made it is up to ZMQ to do the reconnect (don't know of an API to get the state here in salt) ''' self._bind() # Fire off all the minion coroutines self._spawn_minions() # serve forever! self.io_loop.start() @property def restart(self): for minion in self.minions: if minion.restart: return True return False def stop(self, signum): for minion in self.minions: minion.process_manager.stop_restarting() minion.process_manager.send_signal_to_processes(signum) # kill any remaining processes minion.process_manager.kill_children() minion.destroy() def destroy(self): for minion in self.minions: minion.destroy() class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy() class Syndic(Minion): ''' Make a Syndic minion, this minion will use the minion keys on the master to authenticate with a higher level master. ''' def __init__(self, opts, **kwargs): self._syndic_interface = opts.get('interface') self._syndic = True # force auth_safemode True because Syndic don't support autorestart opts['auth_safemode'] = True opts['loop_interval'] = 1 super(Syndic, self).__init__(opts, **kwargs) self.mminion = salt.minion.MasterMinion(opts) self.jid_forward_cache = set() self.jids = {} self.raw_events = [] self.pub_future = None def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # TODO: even do this?? data['to'] = int(data.get('to', self.opts['timeout'])) - 1 # Only forward the command if it didn't originate from ourselves if data.get('master_id', 0) != self.opts.get('master_id', 1): self.syndic_cmd(data) def syndic_cmd(self, data): ''' Take the now clear load and forward it on to the client cmd ''' # Set up default tgt_type if 'tgt_type' not in data: data['tgt_type'] = 'glob' kwargs = {} # optionally add a few fields to the publish data for field in ('master_id', # which master the job came from 'user', # which user ran the job ): if field in data: kwargs[field] = data[field] def timeout_handler(*args): log.warning('Unable to forward pub data: %s', args[1]) return True with tornado.stack_context.ExceptionStackContext(timeout_handler): self.local.pub_async(data['tgt'], data['fun'], data['arg'], data['tgt_type'], data['ret'], data['jid'], data['to'], io_loop=self.io_loop, callback=lambda _: None, **kwargs) def fire_master_syndic_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to false in Sodium release. self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'syndic_start', sync=False, ) self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'syndic'), sync=False, ) # TODO: clean up docs def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) # add handler to subscriber self.pub_channel.on_recv(self._process_cmd_socket) def _process_cmd_socket(self, payload): if payload is not None and payload['enc'] == 'aes': log.trace('Handling payload') self._handle_decoded_payload(payload['load']) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the syndic currently has no need. @tornado.gen.coroutine def reconnect(self): if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication master, self.pub_channel = yield self.eval_master(opts=self.opts) if self.connected: self.opts['master'] = master self.pub_channel.on_recv(self._process_cmd_socket) log.info('Minion is ready to receive requests!') raise tornado.gen.Return(self) def destroy(self): ''' Tear down the syndic minion ''' # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local if hasattr(self, 'forward_events'): self.forward_events.stop() # TODO: need a way of knowing if the syndic connection is busted class SyndicManager(MinionBase): ''' Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Modes (controlled by `syndic_mode`: sync: This mode will synchronize all events and publishes from higher level masters cluster: This mode will only sync job publishes and returns Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~15s trying to forward events to the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts, io_loop=None): opts['loop_interval'] = 1 super(SyndicManager, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) self.syndic_mode = self.opts.get('syndic_mode', 'sync') self.syndic_failover = self.opts.get('syndic_failover', 'random') self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self._has_master = threading.Event() self.jid_forward_cache = set() if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # List of events self.raw_events = [] # Dict of rets: {master_id: {event_tag: job_ret, ...}, ...} self.job_rets = {} # List of delayed job_rets which was unable to send for some reason and will be resend to # any available master self.delayed = [] # Active pub futures: {master_id: (future, [job_ret, ...]), ...} self.pub_futures = {} def _spawn_syndics(self): ''' Spawn all the coroutines which will sign in the syndics ''' self._syndics = OrderedDict() # mapping of opts['master'] -> syndic masters = self.opts['master'] if not isinstance(masters, list): masters = [masters] for master in masters: s_opts = copy.copy(self.opts) s_opts['master'] = master self._syndics[master] = self._connect_syndic(s_opts) @tornado.gen.coroutine def _connect_syndic(self, opts): ''' Create a syndic, and asynchronously connect it to a master ''' auth_wait = opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? log.debug( 'Syndic attempting to connect to %s', opts['master'] ) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, io_loop=self.io_loop, ) yield syndic.connect_master(failed=failed) # set up the syndic to handle publishes (specifically not event forwarding) syndic.tune_in_no_block() # Send an event to the master that the minion is live syndic.fire_master_syndic_start() log.info( 'Syndic successfully connected to %s', opts['master'] ) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up syndic for multi-syndic. Is the ' 'master at %s responding?', opts['master'] ) except (KeyboardInterrupt, SystemExit): raise except Exception: failed = True log.critical( 'Unexpected error while connecting to %s', opts['master'], exc_info=True ) raise tornado.gen.Return(syndic) def _mark_master_dead(self, master): ''' Mark a master as dead. This will start the sign-in routine ''' # if its connected, mark it dead if self._syndics[master].done(): syndic = self._syndics[master].result() # pylint: disable=no-member self._syndics[master] = syndic.reconnect() else: # TODO: debug? log.info( 'Attempting to mark %s as dead, although it is already ' 'marked dead', master ) def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} successful = False # Call for each master for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) successful = True except SaltClientError: log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) if not successful: log.critical('Unable to call %s on any masters!', func) def _return_pub_syndic(self, values, master_id=None): ''' Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for ''' func = '_return_pub_multi' for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue future, data = self.pub_futures.get(master, (None, None)) if future is not None: if not future.done(): if master == master_id: # Targeted master previous send not done yet, call again later return False else: # Fallback master is busy, try the next one continue elif future.exception(): # Previous execution on this master returned an error log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) del self.pub_futures[master] # Add not sent data to the delayed list and try the next master self.delayed.extend(data) continue future = getattr(syndic_future.result(), func)(values, '_syndic_return', timeout=self._return_retry_timer(), sync=False) self.pub_futures[master] = (future, values) return True # Loop done and didn't exit: wasn't sent, try again later return False def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self._syndics.keys()) if self.opts['syndic_failover'] == 'random': shuffle(masters) if master_id not in self._syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self._syndics[master_id] if not masters: break master_id = masters.pop(0) def _reset_event_aggregation(self): self.job_rets = {} self.raw_events = [] def reconnect_event_bus(self, something): future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' self._spawn_syndics() # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) self.local.event.subscribe('') log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id']) # register the event sub to the poller self.job_rets = {} self.raw_events = [] self._reset_event_aggregation() future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, self.opts['syndic_event_forward_timeout'] * 1000, ) self.forward_events.start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() self.io_loop.start() def _process_event(self, raw): # TODO: cleanup: Move down into event class mtag, data = self.local.event.unpack(raw, self.local.event.serial) log.trace('Got event %s', mtag) # pylint: disable=no-member tag_parts = mtag.split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in data: if 'jid' not in data: # Not a job return return if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1): log.debug('Return received with matching master_id, not forwarding') return master = data.get('master_id') jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {}) if not jdict: jdict['__fun__'] = data.get('fun') jdict['__jid__'] = data['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if data['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](data['jid']) ) self.jid_forward_cache.add(data['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if master is not None: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = master ret = {} for key in 'return', 'retcode', 'success': if key in data: ret[key] = data[key] jdict[data['id']] = ret else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events if self.syndic_mode == 'sync': # Add generic event aggregation here if 'retcode' not in data: self.raw_events.append({'data': data, 'tag': mtag}) def _forward_events(self): log.trace('Forwarding events') # pylint: disable=no-member if self.raw_events: events = self.raw_events self.raw_events = [] self._call_syndic('_fire_master', kwargs={'events': events, 'pretag': tagify(self.opts['id'], base='syndic'), 'timeout': self._return_retry_timer(), 'sync': False, }, ) if self.delayed: res = self._return_pub_syndic(self.delayed) if res: self.delayed = [] for master in list(six.iterkeys(self.job_rets)): values = list(six.itervalues(self.job_rets[master])) res = self._return_pub_syndic(values, master_id=master) if res: del self.job_rets[master] class ProxyMinionManager(MinionManager): ''' Create the multi-minion interface but for proxy minions ''' def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None): ''' Helper function to return the correct type of object ''' return ProxyMinion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue) def _metaproxy_call(opts, fn_name): metaproxy = salt.loader.metaproxy(opts) try: metaproxy_name = opts['metaproxy'] except KeyError: metaproxy_name = 'proxy' log.trace( 'No metaproxy key found in opts for id %s. ' 'Defaulting to standard proxy minion.', opts['id'] ) metaproxy_fn = metaproxy_name + '.' + fn_name return metaproxy[metaproxy_fn] class ProxyMinion(Minion): ''' This class instantiates a 'proxy' minion--a minion that does not manipulate the host it runs on, but instead manipulates a device that cannot run a minion. ''' # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check Minion._post_master_init to see if those changes need to be propagated. ProxyMinions need a significantly different post master setup, which is why the differences are not factored out into separate helper functions. ''' mp_call = _metaproxy_call(self.opts, 'post_master_init') return mp_call(self, master) def _target_load(self, load): ''' Verify that the publication is valid and applies to this minion ''' mp_call = _metaproxy_call(self.opts, 'target_load') return mp_call(self, load) def _handle_payload(self, payload): mp_call = _metaproxy_call(self.opts, 'handle_payload') return mp_call(self, payload) @tornado.gen.coroutine def _handle_decoded_payload(self, data): mp_call = _metaproxy_call(self.opts, 'handle_decoded_payload') return mp_call(self, data) @classmethod def _target(cls, minion_instance, opts, data, connected): mp_call = _metaproxy_call(opts, 'target') return mp_call(cls, minion_instance, opts, data, connected) @classmethod def _thread_return(cls, minion_instance, opts, data): mp_call = _metaproxy_call(opts, 'thread_return') return mp_call(cls, minion_instance, opts, data) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): mp_call = _metaproxy_call(opts, 'thread_multi_return') return mp_call(cls, minion_instance, opts, data) class SProxyMinion(SMinion): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SProxyMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.opts['grains'] = salt.loader.grains(self.opts) self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], saltenv=self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts: errmsg = ( 'No "proxy" configuration key found in pillar or opts ' 'dictionaries for id {id}. Check your pillar/options ' 'configuration and contents. Salt-proxy aborted.' ).format(id=self.opts['id']) log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) if 'proxy' not in self.opts: self.opts['proxy'] = self.opts['pillar']['proxy'] # Then load the proxy module self.proxy = salt.loader.proxy(self.opts) self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=False, proxy=self.proxy) self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy) self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts, self.functions, proxy=self.proxy) fq_proxyname = self.opts['proxy']['proxytype'] # we can then sync any proxymodules down from the master # we do a sync_all here in case proxy code was installed by # SPM or was manually placed in /srv/salt/_modules etc. self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv']) self.functions.pack['__proxy__'] = self.proxy self.proxy.pack['__salt__'] = self.functions self.proxy.pack['__ret__'] = self.returners self.proxy.pack['__pillar__'] = self.opts['pillar'] # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__ self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.proxy.pack['__utils__'] = self.utils # Reload all modules so all dunder variables are injected self.proxy.reload_modules() if ('{0}.init'.format(fq_proxyname) not in self.proxy or '{0}.shutdown'.format(fq_proxyname) not in self.proxy): errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \ 'Check your proxymodule. Salt-proxy aborted.' log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])() proxy_init_fn = self.proxy[fq_proxyname + '.init'] proxy_init_fn(self.opts) self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy) # Sync the grains here so the proxy can communicate them to the master self.functions['saltutil.sync_grains'](saltenv='base') self.grains_cache = self.opts['grains'] self.ready = True
saltstack/salt
salt/minion.py
load_args_and_kwargs
python
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False): ''' Detect the args and kwargs that need to be passed to a function call, and check them against what was passed. ''' argspec = salt.utils.args.get_function_argspec(func) _args = [] _kwargs = {} invalid_kwargs = [] for arg in args: if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True: # if the arg is a dict with __kwarg__ == True, then its a kwarg for key, val in six.iteritems(arg): if argspec.keywords or key in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs[key] = val else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. invalid_kwargs.append('{0}={1}'.format(key, val)) continue else: string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632 if string_kwarg: if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs.update(string_kwarg) else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. for key, val in six.iteritems(string_kwarg): invalid_kwargs.append('{0}={1}'.format(key, val)) else: _args.append(arg) if invalid_kwargs and not ignore_invalid: salt.utils.args.invalid_kwargs(invalid_kwargs) if argspec.keywords and isinstance(data, dict): # this function accepts **kwargs, pack in the publish data for key, val in six.iteritems(data): _kwargs['__pub_{0}'.format(key)] = val return _args, _kwargs
Detect the args and kwargs that need to be passed to a function call, and check them against what was passed.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L323-L372
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def iterkeys(d, **kw):\n return d.iterkeys(**kw)\n", "def get_function_argspec(func, is_class_method=None):\n '''\n A small wrapper around getargspec that also supports callable classes\n :param is_class_method: Pass True if you are sure that the function being passed\n is a class method. The reason for this is that on Python 3\n ``inspect.ismethod`` only returns ``True`` for bound methods,\n while on Python 2, it returns ``True`` for bound and unbound\n methods. So, on Python 3, in case of a class method, you'd\n need the class to which the function belongs to be instantiated\n and this is not always wanted.\n '''\n if not callable(func):\n raise TypeError('{0} is not a callable'.format(func))\n\n if six.PY2:\n if is_class_method is True:\n aspec = inspect.getargspec(func)\n del aspec.args[0] # self\n elif inspect.isfunction(func):\n aspec = inspect.getargspec(func)\n elif inspect.ismethod(func):\n aspec = inspect.getargspec(func)\n del aspec.args[0] # self\n elif isinstance(func, object):\n aspec = inspect.getargspec(func.__call__)\n del aspec.args[0] # self\n else:\n raise TypeError(\n 'Cannot inspect argument list for \\'{0}\\''.format(func)\n )\n else:\n if is_class_method is True:\n aspec = _getargspec(func)\n del aspec.args[0] # self\n elif inspect.isfunction(func):\n aspec = _getargspec(func) # pylint: disable=redefined-variable-type\n elif inspect.ismethod(func):\n aspec = _getargspec(func)\n del aspec.args[0] # self\n elif isinstance(func, object):\n aspec = _getargspec(func.__call__)\n del aspec.args[0] # self\n else:\n raise TypeError(\n 'Cannot inspect argument list for \\'{0}\\''.format(func)\n )\n return aspec\n", "def parse_input(args, kwargs=None, condition=True, no_parse=None):\n '''\n Parse out the args and kwargs from a list of input values. Optionally,\n return the args and kwargs without passing them to condition_input().\n\n Don't pull args with key=val apart if it has a newline in it.\n '''\n if no_parse is None:\n no_parse = ()\n if kwargs is None:\n kwargs = {}\n _args = []\n _kwargs = {}\n for arg in args:\n if isinstance(arg, six.string_types):\n arg_name, arg_value = parse_kwarg(arg)\n if arg_name:\n _kwargs[arg_name] = yamlify_arg(arg_value) \\\n if arg_name not in no_parse \\\n else arg_value\n else:\n _args.append(yamlify_arg(arg))\n elif isinstance(arg, dict):\n # Yes, we're popping this key off and adding it back if\n # condition_input is called below, but this is the only way to\n # gracefully handle both CLI and API input.\n if arg.pop('__kwarg__', False) is True:\n _kwargs.update(arg)\n else:\n _args.append(arg)\n else:\n _args.append(arg)\n _kwargs.update(kwargs)\n if condition:\n return condition_input(_args, _kwargs)\n return _args, _kwargs\n", "def invalid_kwargs(invalid_kwargs, raise_exc=True):\n '''\n Raise a SaltInvocationError if invalid_kwargs is non-empty\n '''\n if invalid_kwargs:\n if isinstance(invalid_kwargs, dict):\n new_invalid = [\n '{0}={1}'.format(x, y)\n for x, y in six.iteritems(invalid_kwargs)\n ]\n invalid_kwargs = new_invalid\n msg = (\n 'The following keyword arguments are not valid: {0}'\n .format(', '.join(invalid_kwargs))\n )\n if raise_exc:\n raise SaltInvocationError(msg)\n else:\n return msg\n" ]
# -*- coding: utf-8 -*- ''' Routines to set up a minion ''' # Import python libs from __future__ import absolute_import, print_function, with_statement, unicode_literals import functools import os import sys import copy import time import types import signal import random import logging import threading import traceback import contextlib import multiprocessing from random import randint, shuffle from stat import S_IMODE import salt.serializers.msgpack from binascii import crc32 # Import Salt Libs # pylint: disable=import-error,no-name-in-module,redefined-builtin from salt.ext import six from salt._compat import ipaddress from salt.utils.network import parse_host_port from salt.ext.six.moves import range from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO import salt.transport.client import salt.defaults.exitcodes from salt.utils.ctx import RequestContext # pylint: enable=no-name-in-module,redefined-builtin import tornado HAS_PSUTIL = False try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True except ImportError: pass HAS_RESOURCE = False try: import resource HAS_RESOURCE = True except ImportError: pass try: import zmq.utils.monitor HAS_ZMQ_MONITOR = True except ImportError: HAS_ZMQ_MONITOR = False try: import salt.utils.win_functions HAS_WIN_FUNCTIONS = True except ImportError: HAS_WIN_FUNCTIONS = False # pylint: enable=import-error # Import salt libs import salt import salt.client import salt.crypt import salt.loader import salt.beacons import salt.engines import salt.payload import salt.pillar import salt.syspaths import salt.utils.args import salt.utils.context import salt.utils.data import salt.utils.error import salt.utils.event import salt.utils.files import salt.utils.jid import salt.utils.minion import salt.utils.minions import salt.utils.network import salt.utils.platform import salt.utils.process import salt.utils.schedule import salt.utils.ssdp import salt.utils.user import salt.utils.zeromq import salt.defaults.events import salt.defaults.exitcodes import salt.cli.daemons import salt.log.setup import salt.utils.dictupdate from salt.config import DEFAULT_MINION_OPTS from salt.defaults import DEFAULT_TARGET_DELIM from salt.utils.debug import enable_sigusr1_handler from salt.utils.event import tagify from salt.utils.odict import OrderedDict from salt.utils.process import (default_signals, SignalHandlingMultiprocessingProcess, ProcessManager) from salt.exceptions import ( CommandExecutionError, CommandNotFoundError, SaltInvocationError, SaltReqTimeoutError, SaltClientError, SaltSystemExit, SaltDaemonNotRunning, SaltException, SaltMasterUnresolvableError ) import tornado.gen # pylint: disable=F0401 import tornado.ioloop # pylint: disable=F0401 log = logging.getLogger(__name__) # To set up a minion: # 1. Read in the configuration # 2. Generate the function mapping dict # 3. Authenticate with the master # 4. Store the AES key # 5. Connect to the publisher # 6. Handle publications def resolve_dns(opts, fallback=True): ''' Resolves the master_ip and master_uri options ''' ret = {} check_dns = True if (opts.get('file_client', 'remote') == 'local' and not opts.get('use_master_when_local', False)): check_dns = False # Since salt.log is imported below, salt.utils.network needs to be imported here as well import salt.utils.network if check_dns is True: try: if opts['master'] == '': raise SaltSystemExit ret['master_ip'] = salt.utils.network.dns_check( opts['master'], int(opts['master_port']), True, opts['ipv6'], attempt_connect=False) except SaltClientError: retry_dns_count = opts.get('retry_dns_count', None) if opts['retry_dns']: while True: if retry_dns_count is not None: if retry_dns_count == 0: raise SaltMasterUnresolvableError retry_dns_count -= 1 import salt.log msg = ('Master hostname: \'{0}\' not found or not responsive. ' 'Retrying in {1} seconds').format(opts['master'], opts['retry_dns']) if salt.log.setup.is_console_configured(): log.error(msg) else: print('WARNING: {0}'.format(msg)) time.sleep(opts['retry_dns']) try: ret['master_ip'] = salt.utils.network.dns_check( opts['master'], int(opts['master_port']), True, opts['ipv6'], attempt_connect=False) break except SaltClientError: pass else: if fallback: ret['master_ip'] = '127.0.0.1' else: raise except SaltSystemExit: unknown_str = 'unknown address' master = opts.get('master', unknown_str) if master == '': master = unknown_str if opts.get('__role') == 'syndic': err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'syndic_master\' value in minion config.'.format(master) else: err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'master\' value in minion config.'.format(master) log.error(err) raise SaltSystemExit(code=42, msg=err) else: ret['master_ip'] = '127.0.0.1' if 'master_ip' in ret and 'master_ip' in opts: if ret['master_ip'] != opts['master_ip']: log.warning( 'Master ip address changed from %s to %s', opts['master_ip'], ret['master_ip'] ) if opts['source_interface_name']: log.trace('Custom source interface required: %s', opts['source_interface_name']) interfaces = salt.utils.network.interfaces() log.trace('The following interfaces are available on this Minion:') log.trace(interfaces) if opts['source_interface_name'] in interfaces: if interfaces[opts['source_interface_name']]['up']: addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\ interfaces[opts['source_interface_name']]['inet6'] ret['source_ip'] = addrs[0]['address'] log.debug('Using %s as source IP address', ret['source_ip']) else: log.warning('The interface %s is down so it cannot be used as source to connect to the Master', opts['source_interface_name']) else: log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name']) elif opts['source_address']: ret['source_ip'] = salt.utils.network.dns_check( opts['source_address'], int(opts['source_ret_port']), True, opts['ipv6'], attempt_connect=False) log.debug('Using %s as source IP address', ret['source_ip']) if opts['source_ret_port']: ret['source_ret_port'] = int(opts['source_ret_port']) log.debug('Using %d as source port for the ret server', ret['source_ret_port']) if opts['source_publish_port']: ret['source_publish_port'] = int(opts['source_publish_port']) log.debug('Using %d as source port for the master pub', ret['source_publish_port']) ret['master_uri'] = 'tcp://{ip}:{port}'.format( ip=ret['master_ip'], port=opts['master_port']) log.debug('Master URI: %s', ret['master_uri']) return ret def prep_ip_port(opts): ''' parse host:port values from opts['master'] and return valid: master: ip address or hostname as a string master_port: (optional) master returner port as integer e.g.: - master: 'localhost:1234' -> {'master': 'localhost', 'master_port': 1234} - master: '127.0.0.1:1234' -> {'master': '127.0.0.1', 'master_port' :1234} - master: '[::1]:1234' -> {'master': '::1', 'master_port': 1234} - master: 'fe80::a00:27ff:fedc:ba98' -> {'master': 'fe80::a00:27ff:fedc:ba98'} ''' ret = {} # Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without # a port specified. The is_ipv6 check returns False if brackets are used in the IP # definition such as master: '[::1]:1234'. if opts['master_uri_format'] == 'ip_only': ret['master'] = ipaddress.ip_address(opts['master']) else: host, port = parse_host_port(opts['master']) ret = {'master': host} if port: ret.update({'master_port': port}) return ret def get_proc_dir(cachedir, **kwargs): ''' Given the cache directory, return the directory that process data is stored in, creating it if it doesn't exist. The following optional Keyword Arguments are handled: mode: which is anything os.makedir would accept as mode. uid: the uid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this uid. Must be int. Works only on unix/unix like systems. gid: the gid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this gid. Must be int. Works only on unix/unix like systems. ''' fn_ = os.path.join(cachedir, 'proc') mode = kwargs.pop('mode', None) if mode is None: mode = {} else: mode = {'mode': mode} if not os.path.isdir(fn_): # proc_dir is not present, create it with mode settings os.makedirs(fn_, **mode) d_stat = os.stat(fn_) # if mode is not an empty dict then we have an explicit # dir mode. So lets check if mode needs to be changed. if mode: mode_part = S_IMODE(d_stat.st_mode) if mode_part != mode['mode']: os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode']) if hasattr(os, 'chown'): # only on unix/unix like systems uid = kwargs.pop('uid', -1) gid = kwargs.pop('gid', -1) # if uid and gid are both -1 then go ahead with # no changes at all if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \ [i for i in (uid, gid) if i != -1]: os.chown(fn_, uid, gid) return fn_ def eval_master_func(opts): ''' Evaluate master function if master type is 'func' and save it result in opts['master'] ''' if '__master_func_evaluated' not in opts: # split module and function and try loading the module mod_fun = opts['master'] mod, fun = mod_fun.split('.') try: master_mod = salt.loader.raw_mod(opts, mod, fun) if not master_mod: raise KeyError # we take whatever the module returns as master address opts['master'] = master_mod[mod_fun]() # Check for valid types if not isinstance(opts['master'], (six.string_types, list)): raise TypeError opts['__master_func_evaluated'] = True except KeyError: log.error('Failed to load module %s', mod_fun) sys.exit(salt.defaults.exitcodes.EX_GENERIC) except TypeError: log.error('%s returned from %s is not a string', opts['master'], mod_fun) sys.exit(salt.defaults.exitcodes.EX_GENERIC) log.info('Evaluated master from module: %s', mod_fun) def master_event(type, master=None): ''' Centralized master event function which will return event type based on event_map ''' event_map = {'connected': '__master_connected', 'disconnected': '__master_disconnected', 'failback': '__master_failback', 'alive': '__master_alive'} if type == 'alive' and master is not None: return '{0}_{1}'.format(event_map.get(type), master) return event_map.get(type, None) def service_name(): ''' Return the proper service name based on platform ''' return 'salt_minion' if 'bsd' in sys.platform else 'salt-minion' class MinionBase(object): def __init__(self, opts): self.opts = opts @staticmethod def process_schedule(minion, loop_interval): try: if hasattr(minion, 'schedule'): minion.schedule.eval() else: log.error('Minion scheduler not initialized. Scheduled jobs will not be run.') return # Check if scheduler requires lower loop interval than # the loop_interval setting if minion.schedule.loop_interval < loop_interval: loop_interval = minion.schedule.loop_interval log.debug( 'Overriding loop_interval because of scheduled jobs.' ) except Exception as exc: log.error('Exception %s occurred in scheduled job', exc) return loop_interval def process_beacons(self, functions): ''' Evaluate all of the configured beacons, grab the config again in case the pillar or grains changed ''' if 'config.merge' in functions: b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True) if b_conf: return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member return [] @tornado.gen.coroutine def eval_master(self, opts, timeout=60, safe=True, failed=False, failback=False): ''' Evaluates and returns a tuple of the current master address and the pub_channel. In standard mode, just creates a pub_channel with the given master address. With master_type=func evaluates the current master address from the given module and then creates a pub_channel. With master_type=failover takes the list of masters and loops through them. The first one that allows the minion to create a pub_channel is then returned. If this function is called outside the minions initialization phase (for example from the minions main event-loop when a master connection loss was detected), 'failed' should be set to True. The current (possibly failed) master will then be removed from the list of masters. ''' # return early if we are not connecting to a master if opts['master_type'] == 'disable': log.warning('Master is set to disable, skipping connection') self.connected = False raise tornado.gen.Return((None, None)) # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. # if we are using multimaster, discovery can only happen at start time # because MinionManager handles it. by eval_master time the minion doesn't # know about other siblings currently running if isinstance(self.opts['discovery'], dict) and not self.opts['discovery'].get('multimaster'): self._discover_masters() # check if master_type was altered from its default if opts['master_type'] != 'str' and opts['__role'] != 'syndic': # check for a valid keyword if opts['master_type'] == 'func': eval_master_func(opts) # if failover or distributed is set, master has to be of type list elif opts['master_type'] in ('failover', 'distributed'): if isinstance(opts['master'], list): log.info( 'Got list of available master addresses: %s', opts['master'] ) if opts['master_type'] == 'distributed': master_len = len(opts['master']) if master_len > 1: secondary_masters = opts['master'][1:] master_idx = crc32(opts['id']) % master_len try: preferred_masters = opts['master'] preferred_masters[0] = opts['master'][master_idx] preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]] opts['master'] = preferred_masters log.info('Distributed to the master at \'%s\'.', opts['master'][0]) except (KeyError, AttributeError, TypeError): log.warning('Failed to distribute to a specific master.') else: log.warning('master_type = distributed needs more than 1 master.') if opts['master_shuffle']: log.warning( 'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor ' 'of \'random_master\'. Please update your minion config file.' ) opts['random_master'] = opts['master_shuffle'] opts['auth_tries'] = 0 if opts['master_failback'] and opts['master_failback_interval'] == 0: opts['master_failback_interval'] = opts['master_alive_interval'] # if opts['master'] is a str and we have never created opts['master_list'] elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts): # We have a string, but a list was what was intended. Convert. # See issue 23611 for details opts['master'] = [opts['master']] elif opts['__role'] == 'syndic': log.info('Syndic setting master_syndic to \'%s\'', opts['master']) # if failed=True, the minion was previously connected # we're probably called from the minions main-event-loop # because a master connection loss was detected. remove # the possibly failed master from the list of masters. elif failed: if failback: # failback list of masters to original config opts['master'] = opts['master_list'] else: log.info( 'Moving possibly failed master %s to the end of ' 'the list of masters', opts['master'] ) if opts['master'] in opts['local_masters']: # create new list of master with the possibly failed # one moved to the end failed_master = opts['master'] opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x] opts['master'].append(failed_master) else: opts['master'] = opts['master_list'] else: msg = ('master_type set to \'failover\' but \'master\' ' 'is not of type list but of type ' '{0}'.format(type(opts['master']))) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # If failover is set, minion have to failover on DNS errors instead of retry DNS resolve. # See issue 21082 for details if opts['retry_dns'] and opts['master_type'] == 'failover': msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. ' 'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.') log.critical(msg) opts['retry_dns'] = 0 else: msg = ('Invalid keyword \'{0}\' for variable ' '\'master_type\''.format(opts['master_type'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # FIXME: if SMinion don't define io_loop, it can't switch master see #29088 # Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop # (The channel factories will set a default if the kwarg isn't passed) factory_kwargs = {'timeout': timeout, 'safe': safe} if getattr(self, 'io_loop', None): factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member tries = opts.get('master_tries', 1) attempts = 0 # if we have a list of masters, loop through them and be # happy with the first one that allows us to connect if isinstance(opts['master'], list): conn = False last_exc = None opts['master_uri_list'] = [] opts['local_masters'] = copy.copy(opts['master']) # shuffle the masters and then loop through them if opts['random_master']: # master_failback is only used when master_type is set to failover if opts['master_type'] == 'failover' and opts['master_failback']: secondary_masters = opts['local_masters'][1:] shuffle(secondary_masters) opts['local_masters'][1:] = secondary_masters else: shuffle(opts['local_masters']) # This sits outside of the connection loop below because it needs to set # up a list of master URIs regardless of which masters are available # to connect _to_. This is primarily used for masterless mode, when # we need a list of master URIs to fire calls back to. for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts['master_uri_list'].append(resolve_dns(opts)['master_uri']) while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) # on first run, update self.opts with the whole master list # to enable a minion to re-use old masters if they get fixed if 'master_list' not in opts: opts['master_list'] = copy.copy(opts['local_masters']) self.opts = opts pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs) try: yield pub_channel.connect() conn = True break except SaltClientError as exc: last_exc = exc if exc.strerror.startswith('Could not access'): msg = ( 'Failed to initiate connection with Master ' '%s: check ownership/permissions. Error ' 'message: %s', opts['master'], exc ) else: msg = ('Master %s could not be reached, trying next ' 'next master (if any)', opts['master']) log.info(msg) continue if not conn: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False self.opts['master'] = copy.copy(self.opts['local_masters']) log.error( 'No master could be reached or all masters ' 'denied the minion\'s connection attempt.' ) # If the code reaches this point, 'last_exc' # should already be set. raise last_exc # pylint: disable=E0702 else: self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) # single master sign in else: if opts['random_master']: log.warning('random_master is True but there is only one master specified. Ignoring.') while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) try: if self.opts['transport'] == 'detect': self.opts['detect_mode'] = True for trans in ('zeromq', 'tcp'): if trans == 'zeromq' and not zmq: continue self.opts['transport'] = trans pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() if not pub_channel.auth.authenticated: continue del self.opts['detect_mode'] break else: pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) except SaltClientError as exc: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False raise exc def _discover_masters(self): ''' Discover master(s) and decide where to connect, if SSDP is around. This modifies the configuration on the fly. :return: ''' if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False: master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient() masters = {} for att in range(self.opts['discovery'].get('attempts', 3)): try: att += 1 log.info('Attempting %s time(s) to discover masters', att) masters.update(master_discovery_client.discover()) if not masters: time.sleep(self.opts['discovery'].get('pause', 5)) else: break except Exception as err: log.error('SSDP discovery failure: %s', err) break if masters: policy = self.opts.get('discovery', {}).get('match', 'any') if policy not in ['any', 'all']: log.error('SSDP configuration matcher failure: unknown value "%s". ' 'Should be "any" or "all"', policy) return mapping = self.opts['discovery'].get('mapping', {}) discovered = [] for addr, mappings in masters.items(): for proto_data in mappings: cnt = len([key for key, value in mapping.items() if proto_data.get('mapping', {}).get(key) == value]) if policy == 'any' and bool(cnt) or cnt == len(mapping): if self.opts['discovery'].get('multimaster'): discovered.append(proto_data['master']) else: self.opts['master'] = proto_data['master'] return self.opts['master'] = discovered def _return_retry_timer(self): ''' Based on the minion configuration, either return a randomized timer or just return the value of the return_retry_timer. ''' msg = 'Minion return retry timer set to %s seconds' if self.opts.get('return_retry_timer_max'): try: random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max']) retry_msg = msg % random_retry log.debug('%s (randomized)', msg % random_retry) return random_retry except ValueError: # Catch wiseguys using negative integers here log.error( 'Invalid value (return_retry_timer: %s or ' 'return_retry_timer_max: %s). Both must be positive ' 'integers.', self.opts['return_retry_timer'], self.opts['return_retry_timer_max'], ) log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer']) return DEFAULT_MINION_OPTS['return_retry_timer'] else: log.debug(msg, self.opts.get('return_retry_timer')) return self.opts.get('return_retry_timer') class SMinion(MinionBase): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def __init__(self, opts): # Late setup of the opts grains, so we can log from the grains module import salt.loader opts['grains'] = salt.loader.grains(opts) super(SMinion, self).__init__(opts) # run ssdp discovery if necessary self._discover_masters() # Clean out the proc directory (default /var/cache/salt/minion/proc) if (self.opts.get('file_client', 'remote') == 'remote' or self.opts.get('use_master_when_local', False)): install_zmq() io_loop = ZMQDefaultLoop.current() io_loop.run_sync( lambda: self.eval_master(self.opts, failed=True) ) self.gen_modules(initial_load=True) # If configured, cache pillar data on the minion if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False): import salt.utils.yaml pdir = os.path.join(self.opts['cachedir'], 'pillar') if not os.path.isdir(pdir): os.makedirs(pdir, 0o700) ptop = os.path.join(pdir, 'top.sls') if self.opts['saltenv'] is not None: penv = self.opts['saltenv'] else: penv = 'base' cache_top = {penv: {self.opts['id']: ['cache']}} with salt.utils.files.fopen(ptop, 'wb') as fp_: salt.utils.yaml.safe_dump(cache_top, fp_) os.chmod(ptop, 0o600) cache_sls = os.path.join(pdir, 'cache.sls') with salt.utils.files.fopen(cache_sls, 'wb') as fp_: salt.utils.yaml.safe_dump(self.opts['pillar'], fp_) os.chmod(cache_sls, 0o600) def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.serializers = salt.loader.serializers(self.opts) self.returners = salt.loader.returners(self.opts, self.functions) self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None) # TODO: remove self.function_errors = {} # Keep the funcs clean self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers) self.rend = salt.loader.render(self.opts, self.functions) # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts) class MasterMinion(object): ''' Create a fully loaded minion function object for generic use on the master. What makes this class different is that the pillar is omitted, otherwise everything else is loaded cleanly. ''' def __init__( self, opts, returners=True, states=True, rend=True, matcher=True, whitelist=None, ignore_config_errors=True): self.opts = salt.config.minion_config( opts['conf_file'], ignore_config_errors=ignore_config_errors, role='master' ) self.opts.update(opts) self.whitelist = whitelist self.opts['grains'] = salt.loader.grains(opts) self.opts['pillar'] = {} self.mk_returners = returners self.mk_states = states self.mk_rend = rend self.mk_matcher = matcher self.gen_modules(initial_load=True) def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods( self.opts, utils=self.utils, whitelist=self.whitelist, initial_load=initial_load) self.serializers = salt.loader.serializers(self.opts) if self.mk_returners: self.returners = salt.loader.returners(self.opts, self.functions) if self.mk_states: self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers) if self.mk_rend: self.rend = salt.loader.render(self.opts, self.functions) if self.mk_matcher: self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules class MinionManager(MinionBase): ''' Create a multi minion interface, this creates as many minions as are defined in the master option and binds each minion object to a respective master. ''' def __init__(self, opts): super(MinionManager, self).__init__(opts) self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self.minions = [] self.jid_queue = [] install_zmq() self.io_loop = ZMQDefaultLoop.current() self.process_manager = ProcessManager(name='MultiMinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat def __del__(self): self.destroy() def _bind(self): # start up the event publisher, so we can see events during startup self.event_publisher = salt.utils.event.AsyncEventPublisher( self.opts, io_loop=self.io_loop, ) self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop) self.event.subscribe('') self.event.set_event_handler(self.handle_event) @tornado.gen.coroutine def handle_event(self, package): yield [minion.handle_event(package) for minion in self.minions] def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None): ''' Helper function to return the correct type of object ''' return Minion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue) def _check_minions(self): ''' Check the size of self.minions and raise an error if it's empty ''' if not self.minions: err = ('Minion unable to successfully connect to ' 'a Salt Master.') log.error(err) def _spawn_minions(self, timeout=60): ''' Spawn all the coroutines which will sign in to masters ''' # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. If match is 'any' we let # eval_master handle the discovery instead so disconnections can also handle # discovery if isinstance(self.opts['discovery'], dict) and self.opts['discovery'].get('multimaster'): self._discover_masters() masters = self.opts['master'] if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list): masters = [masters] for master in masters: s_opts = copy.deepcopy(self.opts) s_opts['master'] = master s_opts['multimaster'] = True minion = self._create_minion_object(s_opts, s_opts['auth_timeout'], False, io_loop=self.io_loop, loaded_base_name='salt.loader.{0}'.format(s_opts['master']), jid_queue=self.jid_queue) self.io_loop.spawn_callback(self._connect_minion, minion) self.io_loop.call_later(timeout, self._check_minions) @tornado.gen.coroutine def _connect_minion(self, minion): ''' Create a minion, and asynchronously connect it to a master ''' auth_wait = minion.opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", minion.opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? try: if minion.opts.get('beacons_before_connect', False): minion.setup_beacons(before_connect=True) if minion.opts.get('scheduler_before_connect', False): minion.setup_scheduler(before_connect=True) yield minion.connect_master(failed=failed) minion.tune_in(start=False) self.minions.append(minion) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up minion for multi-master. Is ' 'master at %s responding?', minion.opts['master'] ) except SaltMasterUnresolvableError: err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'master\' value in minion config.'.format(minion.opts['master']) log.error(err) break except Exception as e: failed = True log.critical( 'Unexpected error while connecting to %s', minion.opts['master'], exc_info=True ) # Multi Master Tune In def tune_in(self): ''' Bind to the masters This loop will attempt to create connections to masters it hasn't connected to yet, but once the initial connection is made it is up to ZMQ to do the reconnect (don't know of an API to get the state here in salt) ''' self._bind() # Fire off all the minion coroutines self._spawn_minions() # serve forever! self.io_loop.start() @property def restart(self): for minion in self.minions: if minion.restart: return True return False def stop(self, signum): for minion in self.minions: minion.process_manager.stop_restarting() minion.process_manager.send_signal_to_processes(signum) # kill any remaining processes minion.process_manager.kill_children() minion.destroy() def destroy(self): for minion in self.minions: minion.destroy() class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy() class Syndic(Minion): ''' Make a Syndic minion, this minion will use the minion keys on the master to authenticate with a higher level master. ''' def __init__(self, opts, **kwargs): self._syndic_interface = opts.get('interface') self._syndic = True # force auth_safemode True because Syndic don't support autorestart opts['auth_safemode'] = True opts['loop_interval'] = 1 super(Syndic, self).__init__(opts, **kwargs) self.mminion = salt.minion.MasterMinion(opts) self.jid_forward_cache = set() self.jids = {} self.raw_events = [] self.pub_future = None def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # TODO: even do this?? data['to'] = int(data.get('to', self.opts['timeout'])) - 1 # Only forward the command if it didn't originate from ourselves if data.get('master_id', 0) != self.opts.get('master_id', 1): self.syndic_cmd(data) def syndic_cmd(self, data): ''' Take the now clear load and forward it on to the client cmd ''' # Set up default tgt_type if 'tgt_type' not in data: data['tgt_type'] = 'glob' kwargs = {} # optionally add a few fields to the publish data for field in ('master_id', # which master the job came from 'user', # which user ran the job ): if field in data: kwargs[field] = data[field] def timeout_handler(*args): log.warning('Unable to forward pub data: %s', args[1]) return True with tornado.stack_context.ExceptionStackContext(timeout_handler): self.local.pub_async(data['tgt'], data['fun'], data['arg'], data['tgt_type'], data['ret'], data['jid'], data['to'], io_loop=self.io_loop, callback=lambda _: None, **kwargs) def fire_master_syndic_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to false in Sodium release. self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'syndic_start', sync=False, ) self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'syndic'), sync=False, ) # TODO: clean up docs def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) # add handler to subscriber self.pub_channel.on_recv(self._process_cmd_socket) def _process_cmd_socket(self, payload): if payload is not None and payload['enc'] == 'aes': log.trace('Handling payload') self._handle_decoded_payload(payload['load']) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the syndic currently has no need. @tornado.gen.coroutine def reconnect(self): if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication master, self.pub_channel = yield self.eval_master(opts=self.opts) if self.connected: self.opts['master'] = master self.pub_channel.on_recv(self._process_cmd_socket) log.info('Minion is ready to receive requests!') raise tornado.gen.Return(self) def destroy(self): ''' Tear down the syndic minion ''' # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local if hasattr(self, 'forward_events'): self.forward_events.stop() # TODO: need a way of knowing if the syndic connection is busted class SyndicManager(MinionBase): ''' Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Modes (controlled by `syndic_mode`: sync: This mode will synchronize all events and publishes from higher level masters cluster: This mode will only sync job publishes and returns Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~15s trying to forward events to the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts, io_loop=None): opts['loop_interval'] = 1 super(SyndicManager, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) self.syndic_mode = self.opts.get('syndic_mode', 'sync') self.syndic_failover = self.opts.get('syndic_failover', 'random') self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self._has_master = threading.Event() self.jid_forward_cache = set() if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # List of events self.raw_events = [] # Dict of rets: {master_id: {event_tag: job_ret, ...}, ...} self.job_rets = {} # List of delayed job_rets which was unable to send for some reason and will be resend to # any available master self.delayed = [] # Active pub futures: {master_id: (future, [job_ret, ...]), ...} self.pub_futures = {} def _spawn_syndics(self): ''' Spawn all the coroutines which will sign in the syndics ''' self._syndics = OrderedDict() # mapping of opts['master'] -> syndic masters = self.opts['master'] if not isinstance(masters, list): masters = [masters] for master in masters: s_opts = copy.copy(self.opts) s_opts['master'] = master self._syndics[master] = self._connect_syndic(s_opts) @tornado.gen.coroutine def _connect_syndic(self, opts): ''' Create a syndic, and asynchronously connect it to a master ''' auth_wait = opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? log.debug( 'Syndic attempting to connect to %s', opts['master'] ) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, io_loop=self.io_loop, ) yield syndic.connect_master(failed=failed) # set up the syndic to handle publishes (specifically not event forwarding) syndic.tune_in_no_block() # Send an event to the master that the minion is live syndic.fire_master_syndic_start() log.info( 'Syndic successfully connected to %s', opts['master'] ) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up syndic for multi-syndic. Is the ' 'master at %s responding?', opts['master'] ) except (KeyboardInterrupt, SystemExit): raise except Exception: failed = True log.critical( 'Unexpected error while connecting to %s', opts['master'], exc_info=True ) raise tornado.gen.Return(syndic) def _mark_master_dead(self, master): ''' Mark a master as dead. This will start the sign-in routine ''' # if its connected, mark it dead if self._syndics[master].done(): syndic = self._syndics[master].result() # pylint: disable=no-member self._syndics[master] = syndic.reconnect() else: # TODO: debug? log.info( 'Attempting to mark %s as dead, although it is already ' 'marked dead', master ) def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} successful = False # Call for each master for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) successful = True except SaltClientError: log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) if not successful: log.critical('Unable to call %s on any masters!', func) def _return_pub_syndic(self, values, master_id=None): ''' Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for ''' func = '_return_pub_multi' for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue future, data = self.pub_futures.get(master, (None, None)) if future is not None: if not future.done(): if master == master_id: # Targeted master previous send not done yet, call again later return False else: # Fallback master is busy, try the next one continue elif future.exception(): # Previous execution on this master returned an error log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) del self.pub_futures[master] # Add not sent data to the delayed list and try the next master self.delayed.extend(data) continue future = getattr(syndic_future.result(), func)(values, '_syndic_return', timeout=self._return_retry_timer(), sync=False) self.pub_futures[master] = (future, values) return True # Loop done and didn't exit: wasn't sent, try again later return False def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self._syndics.keys()) if self.opts['syndic_failover'] == 'random': shuffle(masters) if master_id not in self._syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self._syndics[master_id] if not masters: break master_id = masters.pop(0) def _reset_event_aggregation(self): self.job_rets = {} self.raw_events = [] def reconnect_event_bus(self, something): future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' self._spawn_syndics() # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) self.local.event.subscribe('') log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id']) # register the event sub to the poller self.job_rets = {} self.raw_events = [] self._reset_event_aggregation() future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, self.opts['syndic_event_forward_timeout'] * 1000, ) self.forward_events.start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() self.io_loop.start() def _process_event(self, raw): # TODO: cleanup: Move down into event class mtag, data = self.local.event.unpack(raw, self.local.event.serial) log.trace('Got event %s', mtag) # pylint: disable=no-member tag_parts = mtag.split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in data: if 'jid' not in data: # Not a job return return if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1): log.debug('Return received with matching master_id, not forwarding') return master = data.get('master_id') jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {}) if not jdict: jdict['__fun__'] = data.get('fun') jdict['__jid__'] = data['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if data['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](data['jid']) ) self.jid_forward_cache.add(data['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if master is not None: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = master ret = {} for key in 'return', 'retcode', 'success': if key in data: ret[key] = data[key] jdict[data['id']] = ret else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events if self.syndic_mode == 'sync': # Add generic event aggregation here if 'retcode' not in data: self.raw_events.append({'data': data, 'tag': mtag}) def _forward_events(self): log.trace('Forwarding events') # pylint: disable=no-member if self.raw_events: events = self.raw_events self.raw_events = [] self._call_syndic('_fire_master', kwargs={'events': events, 'pretag': tagify(self.opts['id'], base='syndic'), 'timeout': self._return_retry_timer(), 'sync': False, }, ) if self.delayed: res = self._return_pub_syndic(self.delayed) if res: self.delayed = [] for master in list(six.iterkeys(self.job_rets)): values = list(six.itervalues(self.job_rets[master])) res = self._return_pub_syndic(values, master_id=master) if res: del self.job_rets[master] class ProxyMinionManager(MinionManager): ''' Create the multi-minion interface but for proxy minions ''' def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None): ''' Helper function to return the correct type of object ''' return ProxyMinion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue) def _metaproxy_call(opts, fn_name): metaproxy = salt.loader.metaproxy(opts) try: metaproxy_name = opts['metaproxy'] except KeyError: metaproxy_name = 'proxy' log.trace( 'No metaproxy key found in opts for id %s. ' 'Defaulting to standard proxy minion.', opts['id'] ) metaproxy_fn = metaproxy_name + '.' + fn_name return metaproxy[metaproxy_fn] class ProxyMinion(Minion): ''' This class instantiates a 'proxy' minion--a minion that does not manipulate the host it runs on, but instead manipulates a device that cannot run a minion. ''' # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check Minion._post_master_init to see if those changes need to be propagated. ProxyMinions need a significantly different post master setup, which is why the differences are not factored out into separate helper functions. ''' mp_call = _metaproxy_call(self.opts, 'post_master_init') return mp_call(self, master) def _target_load(self, load): ''' Verify that the publication is valid and applies to this minion ''' mp_call = _metaproxy_call(self.opts, 'target_load') return mp_call(self, load) def _handle_payload(self, payload): mp_call = _metaproxy_call(self.opts, 'handle_payload') return mp_call(self, payload) @tornado.gen.coroutine def _handle_decoded_payload(self, data): mp_call = _metaproxy_call(self.opts, 'handle_decoded_payload') return mp_call(self, data) @classmethod def _target(cls, minion_instance, opts, data, connected): mp_call = _metaproxy_call(opts, 'target') return mp_call(cls, minion_instance, opts, data, connected) @classmethod def _thread_return(cls, minion_instance, opts, data): mp_call = _metaproxy_call(opts, 'thread_return') return mp_call(cls, minion_instance, opts, data) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): mp_call = _metaproxy_call(opts, 'thread_multi_return') return mp_call(cls, minion_instance, opts, data) class SProxyMinion(SMinion): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SProxyMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.opts['grains'] = salt.loader.grains(self.opts) self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], saltenv=self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts: errmsg = ( 'No "proxy" configuration key found in pillar or opts ' 'dictionaries for id {id}. Check your pillar/options ' 'configuration and contents. Salt-proxy aborted.' ).format(id=self.opts['id']) log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) if 'proxy' not in self.opts: self.opts['proxy'] = self.opts['pillar']['proxy'] # Then load the proxy module self.proxy = salt.loader.proxy(self.opts) self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=False, proxy=self.proxy) self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy) self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts, self.functions, proxy=self.proxy) fq_proxyname = self.opts['proxy']['proxytype'] # we can then sync any proxymodules down from the master # we do a sync_all here in case proxy code was installed by # SPM or was manually placed in /srv/salt/_modules etc. self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv']) self.functions.pack['__proxy__'] = self.proxy self.proxy.pack['__salt__'] = self.functions self.proxy.pack['__ret__'] = self.returners self.proxy.pack['__pillar__'] = self.opts['pillar'] # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__ self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.proxy.pack['__utils__'] = self.utils # Reload all modules so all dunder variables are injected self.proxy.reload_modules() if ('{0}.init'.format(fq_proxyname) not in self.proxy or '{0}.shutdown'.format(fq_proxyname) not in self.proxy): errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \ 'Check your proxymodule. Salt-proxy aborted.' log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])() proxy_init_fn = self.proxy[fq_proxyname + '.init'] proxy_init_fn(self.opts) self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy) # Sync the grains here so the proxy can communicate them to the master self.functions['saltutil.sync_grains'](saltenv='base') self.grains_cache = self.opts['grains'] self.ready = True
saltstack/salt
salt/minion.py
eval_master_func
python
def eval_master_func(opts): ''' Evaluate master function if master type is 'func' and save it result in opts['master'] ''' if '__master_func_evaluated' not in opts: # split module and function and try loading the module mod_fun = opts['master'] mod, fun = mod_fun.split('.') try: master_mod = salt.loader.raw_mod(opts, mod, fun) if not master_mod: raise KeyError # we take whatever the module returns as master address opts['master'] = master_mod[mod_fun]() # Check for valid types if not isinstance(opts['master'], (six.string_types, list)): raise TypeError opts['__master_func_evaluated'] = True except KeyError: log.error('Failed to load module %s', mod_fun) sys.exit(salt.defaults.exitcodes.EX_GENERIC) except TypeError: log.error('%s returned from %s is not a string', opts['master'], mod_fun) sys.exit(salt.defaults.exitcodes.EX_GENERIC) log.info('Evaluated master from module: %s', mod_fun)
Evaluate master function if master type is 'func' and save it result in opts['master']
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L375-L400
null
# -*- coding: utf-8 -*- ''' Routines to set up a minion ''' # Import python libs from __future__ import absolute_import, print_function, with_statement, unicode_literals import functools import os import sys import copy import time import types import signal import random import logging import threading import traceback import contextlib import multiprocessing from random import randint, shuffle from stat import S_IMODE import salt.serializers.msgpack from binascii import crc32 # Import Salt Libs # pylint: disable=import-error,no-name-in-module,redefined-builtin from salt.ext import six from salt._compat import ipaddress from salt.utils.network import parse_host_port from salt.ext.six.moves import range from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO import salt.transport.client import salt.defaults.exitcodes from salt.utils.ctx import RequestContext # pylint: enable=no-name-in-module,redefined-builtin import tornado HAS_PSUTIL = False try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True except ImportError: pass HAS_RESOURCE = False try: import resource HAS_RESOURCE = True except ImportError: pass try: import zmq.utils.monitor HAS_ZMQ_MONITOR = True except ImportError: HAS_ZMQ_MONITOR = False try: import salt.utils.win_functions HAS_WIN_FUNCTIONS = True except ImportError: HAS_WIN_FUNCTIONS = False # pylint: enable=import-error # Import salt libs import salt import salt.client import salt.crypt import salt.loader import salt.beacons import salt.engines import salt.payload import salt.pillar import salt.syspaths import salt.utils.args import salt.utils.context import salt.utils.data import salt.utils.error import salt.utils.event import salt.utils.files import salt.utils.jid import salt.utils.minion import salt.utils.minions import salt.utils.network import salt.utils.platform import salt.utils.process import salt.utils.schedule import salt.utils.ssdp import salt.utils.user import salt.utils.zeromq import salt.defaults.events import salt.defaults.exitcodes import salt.cli.daemons import salt.log.setup import salt.utils.dictupdate from salt.config import DEFAULT_MINION_OPTS from salt.defaults import DEFAULT_TARGET_DELIM from salt.utils.debug import enable_sigusr1_handler from salt.utils.event import tagify from salt.utils.odict import OrderedDict from salt.utils.process import (default_signals, SignalHandlingMultiprocessingProcess, ProcessManager) from salt.exceptions import ( CommandExecutionError, CommandNotFoundError, SaltInvocationError, SaltReqTimeoutError, SaltClientError, SaltSystemExit, SaltDaemonNotRunning, SaltException, SaltMasterUnresolvableError ) import tornado.gen # pylint: disable=F0401 import tornado.ioloop # pylint: disable=F0401 log = logging.getLogger(__name__) # To set up a minion: # 1. Read in the configuration # 2. Generate the function mapping dict # 3. Authenticate with the master # 4. Store the AES key # 5. Connect to the publisher # 6. Handle publications def resolve_dns(opts, fallback=True): ''' Resolves the master_ip and master_uri options ''' ret = {} check_dns = True if (opts.get('file_client', 'remote') == 'local' and not opts.get('use_master_when_local', False)): check_dns = False # Since salt.log is imported below, salt.utils.network needs to be imported here as well import salt.utils.network if check_dns is True: try: if opts['master'] == '': raise SaltSystemExit ret['master_ip'] = salt.utils.network.dns_check( opts['master'], int(opts['master_port']), True, opts['ipv6'], attempt_connect=False) except SaltClientError: retry_dns_count = opts.get('retry_dns_count', None) if opts['retry_dns']: while True: if retry_dns_count is not None: if retry_dns_count == 0: raise SaltMasterUnresolvableError retry_dns_count -= 1 import salt.log msg = ('Master hostname: \'{0}\' not found or not responsive. ' 'Retrying in {1} seconds').format(opts['master'], opts['retry_dns']) if salt.log.setup.is_console_configured(): log.error(msg) else: print('WARNING: {0}'.format(msg)) time.sleep(opts['retry_dns']) try: ret['master_ip'] = salt.utils.network.dns_check( opts['master'], int(opts['master_port']), True, opts['ipv6'], attempt_connect=False) break except SaltClientError: pass else: if fallback: ret['master_ip'] = '127.0.0.1' else: raise except SaltSystemExit: unknown_str = 'unknown address' master = opts.get('master', unknown_str) if master == '': master = unknown_str if opts.get('__role') == 'syndic': err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'syndic_master\' value in minion config.'.format(master) else: err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'master\' value in minion config.'.format(master) log.error(err) raise SaltSystemExit(code=42, msg=err) else: ret['master_ip'] = '127.0.0.1' if 'master_ip' in ret and 'master_ip' in opts: if ret['master_ip'] != opts['master_ip']: log.warning( 'Master ip address changed from %s to %s', opts['master_ip'], ret['master_ip'] ) if opts['source_interface_name']: log.trace('Custom source interface required: %s', opts['source_interface_name']) interfaces = salt.utils.network.interfaces() log.trace('The following interfaces are available on this Minion:') log.trace(interfaces) if opts['source_interface_name'] in interfaces: if interfaces[opts['source_interface_name']]['up']: addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\ interfaces[opts['source_interface_name']]['inet6'] ret['source_ip'] = addrs[0]['address'] log.debug('Using %s as source IP address', ret['source_ip']) else: log.warning('The interface %s is down so it cannot be used as source to connect to the Master', opts['source_interface_name']) else: log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name']) elif opts['source_address']: ret['source_ip'] = salt.utils.network.dns_check( opts['source_address'], int(opts['source_ret_port']), True, opts['ipv6'], attempt_connect=False) log.debug('Using %s as source IP address', ret['source_ip']) if opts['source_ret_port']: ret['source_ret_port'] = int(opts['source_ret_port']) log.debug('Using %d as source port for the ret server', ret['source_ret_port']) if opts['source_publish_port']: ret['source_publish_port'] = int(opts['source_publish_port']) log.debug('Using %d as source port for the master pub', ret['source_publish_port']) ret['master_uri'] = 'tcp://{ip}:{port}'.format( ip=ret['master_ip'], port=opts['master_port']) log.debug('Master URI: %s', ret['master_uri']) return ret def prep_ip_port(opts): ''' parse host:port values from opts['master'] and return valid: master: ip address or hostname as a string master_port: (optional) master returner port as integer e.g.: - master: 'localhost:1234' -> {'master': 'localhost', 'master_port': 1234} - master: '127.0.0.1:1234' -> {'master': '127.0.0.1', 'master_port' :1234} - master: '[::1]:1234' -> {'master': '::1', 'master_port': 1234} - master: 'fe80::a00:27ff:fedc:ba98' -> {'master': 'fe80::a00:27ff:fedc:ba98'} ''' ret = {} # Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without # a port specified. The is_ipv6 check returns False if brackets are used in the IP # definition such as master: '[::1]:1234'. if opts['master_uri_format'] == 'ip_only': ret['master'] = ipaddress.ip_address(opts['master']) else: host, port = parse_host_port(opts['master']) ret = {'master': host} if port: ret.update({'master_port': port}) return ret def get_proc_dir(cachedir, **kwargs): ''' Given the cache directory, return the directory that process data is stored in, creating it if it doesn't exist. The following optional Keyword Arguments are handled: mode: which is anything os.makedir would accept as mode. uid: the uid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this uid. Must be int. Works only on unix/unix like systems. gid: the gid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this gid. Must be int. Works only on unix/unix like systems. ''' fn_ = os.path.join(cachedir, 'proc') mode = kwargs.pop('mode', None) if mode is None: mode = {} else: mode = {'mode': mode} if not os.path.isdir(fn_): # proc_dir is not present, create it with mode settings os.makedirs(fn_, **mode) d_stat = os.stat(fn_) # if mode is not an empty dict then we have an explicit # dir mode. So lets check if mode needs to be changed. if mode: mode_part = S_IMODE(d_stat.st_mode) if mode_part != mode['mode']: os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode']) if hasattr(os, 'chown'): # only on unix/unix like systems uid = kwargs.pop('uid', -1) gid = kwargs.pop('gid', -1) # if uid and gid are both -1 then go ahead with # no changes at all if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \ [i for i in (uid, gid) if i != -1]: os.chown(fn_, uid, gid) return fn_ def load_args_and_kwargs(func, args, data=None, ignore_invalid=False): ''' Detect the args and kwargs that need to be passed to a function call, and check them against what was passed. ''' argspec = salt.utils.args.get_function_argspec(func) _args = [] _kwargs = {} invalid_kwargs = [] for arg in args: if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True: # if the arg is a dict with __kwarg__ == True, then its a kwarg for key, val in six.iteritems(arg): if argspec.keywords or key in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs[key] = val else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. invalid_kwargs.append('{0}={1}'.format(key, val)) continue else: string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632 if string_kwarg: if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs.update(string_kwarg) else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. for key, val in six.iteritems(string_kwarg): invalid_kwargs.append('{0}={1}'.format(key, val)) else: _args.append(arg) if invalid_kwargs and not ignore_invalid: salt.utils.args.invalid_kwargs(invalid_kwargs) if argspec.keywords and isinstance(data, dict): # this function accepts **kwargs, pack in the publish data for key, val in six.iteritems(data): _kwargs['__pub_{0}'.format(key)] = val return _args, _kwargs def master_event(type, master=None): ''' Centralized master event function which will return event type based on event_map ''' event_map = {'connected': '__master_connected', 'disconnected': '__master_disconnected', 'failback': '__master_failback', 'alive': '__master_alive'} if type == 'alive' and master is not None: return '{0}_{1}'.format(event_map.get(type), master) return event_map.get(type, None) def service_name(): ''' Return the proper service name based on platform ''' return 'salt_minion' if 'bsd' in sys.platform else 'salt-minion' class MinionBase(object): def __init__(self, opts): self.opts = opts @staticmethod def process_schedule(minion, loop_interval): try: if hasattr(minion, 'schedule'): minion.schedule.eval() else: log.error('Minion scheduler not initialized. Scheduled jobs will not be run.') return # Check if scheduler requires lower loop interval than # the loop_interval setting if minion.schedule.loop_interval < loop_interval: loop_interval = minion.schedule.loop_interval log.debug( 'Overriding loop_interval because of scheduled jobs.' ) except Exception as exc: log.error('Exception %s occurred in scheduled job', exc) return loop_interval def process_beacons(self, functions): ''' Evaluate all of the configured beacons, grab the config again in case the pillar or grains changed ''' if 'config.merge' in functions: b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True) if b_conf: return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member return [] @tornado.gen.coroutine def eval_master(self, opts, timeout=60, safe=True, failed=False, failback=False): ''' Evaluates and returns a tuple of the current master address and the pub_channel. In standard mode, just creates a pub_channel with the given master address. With master_type=func evaluates the current master address from the given module and then creates a pub_channel. With master_type=failover takes the list of masters and loops through them. The first one that allows the minion to create a pub_channel is then returned. If this function is called outside the minions initialization phase (for example from the minions main event-loop when a master connection loss was detected), 'failed' should be set to True. The current (possibly failed) master will then be removed from the list of masters. ''' # return early if we are not connecting to a master if opts['master_type'] == 'disable': log.warning('Master is set to disable, skipping connection') self.connected = False raise tornado.gen.Return((None, None)) # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. # if we are using multimaster, discovery can only happen at start time # because MinionManager handles it. by eval_master time the minion doesn't # know about other siblings currently running if isinstance(self.opts['discovery'], dict) and not self.opts['discovery'].get('multimaster'): self._discover_masters() # check if master_type was altered from its default if opts['master_type'] != 'str' and opts['__role'] != 'syndic': # check for a valid keyword if opts['master_type'] == 'func': eval_master_func(opts) # if failover or distributed is set, master has to be of type list elif opts['master_type'] in ('failover', 'distributed'): if isinstance(opts['master'], list): log.info( 'Got list of available master addresses: %s', opts['master'] ) if opts['master_type'] == 'distributed': master_len = len(opts['master']) if master_len > 1: secondary_masters = opts['master'][1:] master_idx = crc32(opts['id']) % master_len try: preferred_masters = opts['master'] preferred_masters[0] = opts['master'][master_idx] preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]] opts['master'] = preferred_masters log.info('Distributed to the master at \'%s\'.', opts['master'][0]) except (KeyError, AttributeError, TypeError): log.warning('Failed to distribute to a specific master.') else: log.warning('master_type = distributed needs more than 1 master.') if opts['master_shuffle']: log.warning( 'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor ' 'of \'random_master\'. Please update your minion config file.' ) opts['random_master'] = opts['master_shuffle'] opts['auth_tries'] = 0 if opts['master_failback'] and opts['master_failback_interval'] == 0: opts['master_failback_interval'] = opts['master_alive_interval'] # if opts['master'] is a str and we have never created opts['master_list'] elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts): # We have a string, but a list was what was intended. Convert. # See issue 23611 for details opts['master'] = [opts['master']] elif opts['__role'] == 'syndic': log.info('Syndic setting master_syndic to \'%s\'', opts['master']) # if failed=True, the minion was previously connected # we're probably called from the minions main-event-loop # because a master connection loss was detected. remove # the possibly failed master from the list of masters. elif failed: if failback: # failback list of masters to original config opts['master'] = opts['master_list'] else: log.info( 'Moving possibly failed master %s to the end of ' 'the list of masters', opts['master'] ) if opts['master'] in opts['local_masters']: # create new list of master with the possibly failed # one moved to the end failed_master = opts['master'] opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x] opts['master'].append(failed_master) else: opts['master'] = opts['master_list'] else: msg = ('master_type set to \'failover\' but \'master\' ' 'is not of type list but of type ' '{0}'.format(type(opts['master']))) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # If failover is set, minion have to failover on DNS errors instead of retry DNS resolve. # See issue 21082 for details if opts['retry_dns'] and opts['master_type'] == 'failover': msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. ' 'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.') log.critical(msg) opts['retry_dns'] = 0 else: msg = ('Invalid keyword \'{0}\' for variable ' '\'master_type\''.format(opts['master_type'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # FIXME: if SMinion don't define io_loop, it can't switch master see #29088 # Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop # (The channel factories will set a default if the kwarg isn't passed) factory_kwargs = {'timeout': timeout, 'safe': safe} if getattr(self, 'io_loop', None): factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member tries = opts.get('master_tries', 1) attempts = 0 # if we have a list of masters, loop through them and be # happy with the first one that allows us to connect if isinstance(opts['master'], list): conn = False last_exc = None opts['master_uri_list'] = [] opts['local_masters'] = copy.copy(opts['master']) # shuffle the masters and then loop through them if opts['random_master']: # master_failback is only used when master_type is set to failover if opts['master_type'] == 'failover' and opts['master_failback']: secondary_masters = opts['local_masters'][1:] shuffle(secondary_masters) opts['local_masters'][1:] = secondary_masters else: shuffle(opts['local_masters']) # This sits outside of the connection loop below because it needs to set # up a list of master URIs regardless of which masters are available # to connect _to_. This is primarily used for masterless mode, when # we need a list of master URIs to fire calls back to. for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts['master_uri_list'].append(resolve_dns(opts)['master_uri']) while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) # on first run, update self.opts with the whole master list # to enable a minion to re-use old masters if they get fixed if 'master_list' not in opts: opts['master_list'] = copy.copy(opts['local_masters']) self.opts = opts pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs) try: yield pub_channel.connect() conn = True break except SaltClientError as exc: last_exc = exc if exc.strerror.startswith('Could not access'): msg = ( 'Failed to initiate connection with Master ' '%s: check ownership/permissions. Error ' 'message: %s', opts['master'], exc ) else: msg = ('Master %s could not be reached, trying next ' 'next master (if any)', opts['master']) log.info(msg) continue if not conn: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False self.opts['master'] = copy.copy(self.opts['local_masters']) log.error( 'No master could be reached or all masters ' 'denied the minion\'s connection attempt.' ) # If the code reaches this point, 'last_exc' # should already be set. raise last_exc # pylint: disable=E0702 else: self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) # single master sign in else: if opts['random_master']: log.warning('random_master is True but there is only one master specified. Ignoring.') while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) try: if self.opts['transport'] == 'detect': self.opts['detect_mode'] = True for trans in ('zeromq', 'tcp'): if trans == 'zeromq' and not zmq: continue self.opts['transport'] = trans pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() if not pub_channel.auth.authenticated: continue del self.opts['detect_mode'] break else: pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) except SaltClientError as exc: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False raise exc def _discover_masters(self): ''' Discover master(s) and decide where to connect, if SSDP is around. This modifies the configuration on the fly. :return: ''' if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False: master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient() masters = {} for att in range(self.opts['discovery'].get('attempts', 3)): try: att += 1 log.info('Attempting %s time(s) to discover masters', att) masters.update(master_discovery_client.discover()) if not masters: time.sleep(self.opts['discovery'].get('pause', 5)) else: break except Exception as err: log.error('SSDP discovery failure: %s', err) break if masters: policy = self.opts.get('discovery', {}).get('match', 'any') if policy not in ['any', 'all']: log.error('SSDP configuration matcher failure: unknown value "%s". ' 'Should be "any" or "all"', policy) return mapping = self.opts['discovery'].get('mapping', {}) discovered = [] for addr, mappings in masters.items(): for proto_data in mappings: cnt = len([key for key, value in mapping.items() if proto_data.get('mapping', {}).get(key) == value]) if policy == 'any' and bool(cnt) or cnt == len(mapping): if self.opts['discovery'].get('multimaster'): discovered.append(proto_data['master']) else: self.opts['master'] = proto_data['master'] return self.opts['master'] = discovered def _return_retry_timer(self): ''' Based on the minion configuration, either return a randomized timer or just return the value of the return_retry_timer. ''' msg = 'Minion return retry timer set to %s seconds' if self.opts.get('return_retry_timer_max'): try: random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max']) retry_msg = msg % random_retry log.debug('%s (randomized)', msg % random_retry) return random_retry except ValueError: # Catch wiseguys using negative integers here log.error( 'Invalid value (return_retry_timer: %s or ' 'return_retry_timer_max: %s). Both must be positive ' 'integers.', self.opts['return_retry_timer'], self.opts['return_retry_timer_max'], ) log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer']) return DEFAULT_MINION_OPTS['return_retry_timer'] else: log.debug(msg, self.opts.get('return_retry_timer')) return self.opts.get('return_retry_timer') class SMinion(MinionBase): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def __init__(self, opts): # Late setup of the opts grains, so we can log from the grains module import salt.loader opts['grains'] = salt.loader.grains(opts) super(SMinion, self).__init__(opts) # run ssdp discovery if necessary self._discover_masters() # Clean out the proc directory (default /var/cache/salt/minion/proc) if (self.opts.get('file_client', 'remote') == 'remote' or self.opts.get('use_master_when_local', False)): install_zmq() io_loop = ZMQDefaultLoop.current() io_loop.run_sync( lambda: self.eval_master(self.opts, failed=True) ) self.gen_modules(initial_load=True) # If configured, cache pillar data on the minion if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False): import salt.utils.yaml pdir = os.path.join(self.opts['cachedir'], 'pillar') if not os.path.isdir(pdir): os.makedirs(pdir, 0o700) ptop = os.path.join(pdir, 'top.sls') if self.opts['saltenv'] is not None: penv = self.opts['saltenv'] else: penv = 'base' cache_top = {penv: {self.opts['id']: ['cache']}} with salt.utils.files.fopen(ptop, 'wb') as fp_: salt.utils.yaml.safe_dump(cache_top, fp_) os.chmod(ptop, 0o600) cache_sls = os.path.join(pdir, 'cache.sls') with salt.utils.files.fopen(cache_sls, 'wb') as fp_: salt.utils.yaml.safe_dump(self.opts['pillar'], fp_) os.chmod(cache_sls, 0o600) def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.serializers = salt.loader.serializers(self.opts) self.returners = salt.loader.returners(self.opts, self.functions) self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None) # TODO: remove self.function_errors = {} # Keep the funcs clean self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers) self.rend = salt.loader.render(self.opts, self.functions) # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts) class MasterMinion(object): ''' Create a fully loaded minion function object for generic use on the master. What makes this class different is that the pillar is omitted, otherwise everything else is loaded cleanly. ''' def __init__( self, opts, returners=True, states=True, rend=True, matcher=True, whitelist=None, ignore_config_errors=True): self.opts = salt.config.minion_config( opts['conf_file'], ignore_config_errors=ignore_config_errors, role='master' ) self.opts.update(opts) self.whitelist = whitelist self.opts['grains'] = salt.loader.grains(opts) self.opts['pillar'] = {} self.mk_returners = returners self.mk_states = states self.mk_rend = rend self.mk_matcher = matcher self.gen_modules(initial_load=True) def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods( self.opts, utils=self.utils, whitelist=self.whitelist, initial_load=initial_load) self.serializers = salt.loader.serializers(self.opts) if self.mk_returners: self.returners = salt.loader.returners(self.opts, self.functions) if self.mk_states: self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers) if self.mk_rend: self.rend = salt.loader.render(self.opts, self.functions) if self.mk_matcher: self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules class MinionManager(MinionBase): ''' Create a multi minion interface, this creates as many minions as are defined in the master option and binds each minion object to a respective master. ''' def __init__(self, opts): super(MinionManager, self).__init__(opts) self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self.minions = [] self.jid_queue = [] install_zmq() self.io_loop = ZMQDefaultLoop.current() self.process_manager = ProcessManager(name='MultiMinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat def __del__(self): self.destroy() def _bind(self): # start up the event publisher, so we can see events during startup self.event_publisher = salt.utils.event.AsyncEventPublisher( self.opts, io_loop=self.io_loop, ) self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop) self.event.subscribe('') self.event.set_event_handler(self.handle_event) @tornado.gen.coroutine def handle_event(self, package): yield [minion.handle_event(package) for minion in self.minions] def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None): ''' Helper function to return the correct type of object ''' return Minion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue) def _check_minions(self): ''' Check the size of self.minions and raise an error if it's empty ''' if not self.minions: err = ('Minion unable to successfully connect to ' 'a Salt Master.') log.error(err) def _spawn_minions(self, timeout=60): ''' Spawn all the coroutines which will sign in to masters ''' # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. If match is 'any' we let # eval_master handle the discovery instead so disconnections can also handle # discovery if isinstance(self.opts['discovery'], dict) and self.opts['discovery'].get('multimaster'): self._discover_masters() masters = self.opts['master'] if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list): masters = [masters] for master in masters: s_opts = copy.deepcopy(self.opts) s_opts['master'] = master s_opts['multimaster'] = True minion = self._create_minion_object(s_opts, s_opts['auth_timeout'], False, io_loop=self.io_loop, loaded_base_name='salt.loader.{0}'.format(s_opts['master']), jid_queue=self.jid_queue) self.io_loop.spawn_callback(self._connect_minion, minion) self.io_loop.call_later(timeout, self._check_minions) @tornado.gen.coroutine def _connect_minion(self, minion): ''' Create a minion, and asynchronously connect it to a master ''' auth_wait = minion.opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", minion.opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? try: if minion.opts.get('beacons_before_connect', False): minion.setup_beacons(before_connect=True) if minion.opts.get('scheduler_before_connect', False): minion.setup_scheduler(before_connect=True) yield minion.connect_master(failed=failed) minion.tune_in(start=False) self.minions.append(minion) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up minion for multi-master. Is ' 'master at %s responding?', minion.opts['master'] ) except SaltMasterUnresolvableError: err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'master\' value in minion config.'.format(minion.opts['master']) log.error(err) break except Exception as e: failed = True log.critical( 'Unexpected error while connecting to %s', minion.opts['master'], exc_info=True ) # Multi Master Tune In def tune_in(self): ''' Bind to the masters This loop will attempt to create connections to masters it hasn't connected to yet, but once the initial connection is made it is up to ZMQ to do the reconnect (don't know of an API to get the state here in salt) ''' self._bind() # Fire off all the minion coroutines self._spawn_minions() # serve forever! self.io_loop.start() @property def restart(self): for minion in self.minions: if minion.restart: return True return False def stop(self, signum): for minion in self.minions: minion.process_manager.stop_restarting() minion.process_manager.send_signal_to_processes(signum) # kill any remaining processes minion.process_manager.kill_children() minion.destroy() def destroy(self): for minion in self.minions: minion.destroy() class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy() class Syndic(Minion): ''' Make a Syndic minion, this minion will use the minion keys on the master to authenticate with a higher level master. ''' def __init__(self, opts, **kwargs): self._syndic_interface = opts.get('interface') self._syndic = True # force auth_safemode True because Syndic don't support autorestart opts['auth_safemode'] = True opts['loop_interval'] = 1 super(Syndic, self).__init__(opts, **kwargs) self.mminion = salt.minion.MasterMinion(opts) self.jid_forward_cache = set() self.jids = {} self.raw_events = [] self.pub_future = None def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # TODO: even do this?? data['to'] = int(data.get('to', self.opts['timeout'])) - 1 # Only forward the command if it didn't originate from ourselves if data.get('master_id', 0) != self.opts.get('master_id', 1): self.syndic_cmd(data) def syndic_cmd(self, data): ''' Take the now clear load and forward it on to the client cmd ''' # Set up default tgt_type if 'tgt_type' not in data: data['tgt_type'] = 'glob' kwargs = {} # optionally add a few fields to the publish data for field in ('master_id', # which master the job came from 'user', # which user ran the job ): if field in data: kwargs[field] = data[field] def timeout_handler(*args): log.warning('Unable to forward pub data: %s', args[1]) return True with tornado.stack_context.ExceptionStackContext(timeout_handler): self.local.pub_async(data['tgt'], data['fun'], data['arg'], data['tgt_type'], data['ret'], data['jid'], data['to'], io_loop=self.io_loop, callback=lambda _: None, **kwargs) def fire_master_syndic_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to false in Sodium release. self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'syndic_start', sync=False, ) self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'syndic'), sync=False, ) # TODO: clean up docs def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) # add handler to subscriber self.pub_channel.on_recv(self._process_cmd_socket) def _process_cmd_socket(self, payload): if payload is not None and payload['enc'] == 'aes': log.trace('Handling payload') self._handle_decoded_payload(payload['load']) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the syndic currently has no need. @tornado.gen.coroutine def reconnect(self): if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication master, self.pub_channel = yield self.eval_master(opts=self.opts) if self.connected: self.opts['master'] = master self.pub_channel.on_recv(self._process_cmd_socket) log.info('Minion is ready to receive requests!') raise tornado.gen.Return(self) def destroy(self): ''' Tear down the syndic minion ''' # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local if hasattr(self, 'forward_events'): self.forward_events.stop() # TODO: need a way of knowing if the syndic connection is busted class SyndicManager(MinionBase): ''' Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Modes (controlled by `syndic_mode`: sync: This mode will synchronize all events and publishes from higher level masters cluster: This mode will only sync job publishes and returns Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~15s trying to forward events to the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts, io_loop=None): opts['loop_interval'] = 1 super(SyndicManager, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) self.syndic_mode = self.opts.get('syndic_mode', 'sync') self.syndic_failover = self.opts.get('syndic_failover', 'random') self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self._has_master = threading.Event() self.jid_forward_cache = set() if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # List of events self.raw_events = [] # Dict of rets: {master_id: {event_tag: job_ret, ...}, ...} self.job_rets = {} # List of delayed job_rets which was unable to send for some reason and will be resend to # any available master self.delayed = [] # Active pub futures: {master_id: (future, [job_ret, ...]), ...} self.pub_futures = {} def _spawn_syndics(self): ''' Spawn all the coroutines which will sign in the syndics ''' self._syndics = OrderedDict() # mapping of opts['master'] -> syndic masters = self.opts['master'] if not isinstance(masters, list): masters = [masters] for master in masters: s_opts = copy.copy(self.opts) s_opts['master'] = master self._syndics[master] = self._connect_syndic(s_opts) @tornado.gen.coroutine def _connect_syndic(self, opts): ''' Create a syndic, and asynchronously connect it to a master ''' auth_wait = opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? log.debug( 'Syndic attempting to connect to %s', opts['master'] ) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, io_loop=self.io_loop, ) yield syndic.connect_master(failed=failed) # set up the syndic to handle publishes (specifically not event forwarding) syndic.tune_in_no_block() # Send an event to the master that the minion is live syndic.fire_master_syndic_start() log.info( 'Syndic successfully connected to %s', opts['master'] ) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up syndic for multi-syndic. Is the ' 'master at %s responding?', opts['master'] ) except (KeyboardInterrupt, SystemExit): raise except Exception: failed = True log.critical( 'Unexpected error while connecting to %s', opts['master'], exc_info=True ) raise tornado.gen.Return(syndic) def _mark_master_dead(self, master): ''' Mark a master as dead. This will start the sign-in routine ''' # if its connected, mark it dead if self._syndics[master].done(): syndic = self._syndics[master].result() # pylint: disable=no-member self._syndics[master] = syndic.reconnect() else: # TODO: debug? log.info( 'Attempting to mark %s as dead, although it is already ' 'marked dead', master ) def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} successful = False # Call for each master for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) successful = True except SaltClientError: log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) if not successful: log.critical('Unable to call %s on any masters!', func) def _return_pub_syndic(self, values, master_id=None): ''' Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for ''' func = '_return_pub_multi' for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue future, data = self.pub_futures.get(master, (None, None)) if future is not None: if not future.done(): if master == master_id: # Targeted master previous send not done yet, call again later return False else: # Fallback master is busy, try the next one continue elif future.exception(): # Previous execution on this master returned an error log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) del self.pub_futures[master] # Add not sent data to the delayed list and try the next master self.delayed.extend(data) continue future = getattr(syndic_future.result(), func)(values, '_syndic_return', timeout=self._return_retry_timer(), sync=False) self.pub_futures[master] = (future, values) return True # Loop done and didn't exit: wasn't sent, try again later return False def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self._syndics.keys()) if self.opts['syndic_failover'] == 'random': shuffle(masters) if master_id not in self._syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self._syndics[master_id] if not masters: break master_id = masters.pop(0) def _reset_event_aggregation(self): self.job_rets = {} self.raw_events = [] def reconnect_event_bus(self, something): future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' self._spawn_syndics() # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) self.local.event.subscribe('') log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id']) # register the event sub to the poller self.job_rets = {} self.raw_events = [] self._reset_event_aggregation() future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, self.opts['syndic_event_forward_timeout'] * 1000, ) self.forward_events.start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() self.io_loop.start() def _process_event(self, raw): # TODO: cleanup: Move down into event class mtag, data = self.local.event.unpack(raw, self.local.event.serial) log.trace('Got event %s', mtag) # pylint: disable=no-member tag_parts = mtag.split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in data: if 'jid' not in data: # Not a job return return if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1): log.debug('Return received with matching master_id, not forwarding') return master = data.get('master_id') jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {}) if not jdict: jdict['__fun__'] = data.get('fun') jdict['__jid__'] = data['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if data['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](data['jid']) ) self.jid_forward_cache.add(data['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if master is not None: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = master ret = {} for key in 'return', 'retcode', 'success': if key in data: ret[key] = data[key] jdict[data['id']] = ret else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events if self.syndic_mode == 'sync': # Add generic event aggregation here if 'retcode' not in data: self.raw_events.append({'data': data, 'tag': mtag}) def _forward_events(self): log.trace('Forwarding events') # pylint: disable=no-member if self.raw_events: events = self.raw_events self.raw_events = [] self._call_syndic('_fire_master', kwargs={'events': events, 'pretag': tagify(self.opts['id'], base='syndic'), 'timeout': self._return_retry_timer(), 'sync': False, }, ) if self.delayed: res = self._return_pub_syndic(self.delayed) if res: self.delayed = [] for master in list(six.iterkeys(self.job_rets)): values = list(six.itervalues(self.job_rets[master])) res = self._return_pub_syndic(values, master_id=master) if res: del self.job_rets[master] class ProxyMinionManager(MinionManager): ''' Create the multi-minion interface but for proxy minions ''' def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None): ''' Helper function to return the correct type of object ''' return ProxyMinion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue) def _metaproxy_call(opts, fn_name): metaproxy = salt.loader.metaproxy(opts) try: metaproxy_name = opts['metaproxy'] except KeyError: metaproxy_name = 'proxy' log.trace( 'No metaproxy key found in opts for id %s. ' 'Defaulting to standard proxy minion.', opts['id'] ) metaproxy_fn = metaproxy_name + '.' + fn_name return metaproxy[metaproxy_fn] class ProxyMinion(Minion): ''' This class instantiates a 'proxy' minion--a minion that does not manipulate the host it runs on, but instead manipulates a device that cannot run a minion. ''' # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check Minion._post_master_init to see if those changes need to be propagated. ProxyMinions need a significantly different post master setup, which is why the differences are not factored out into separate helper functions. ''' mp_call = _metaproxy_call(self.opts, 'post_master_init') return mp_call(self, master) def _target_load(self, load): ''' Verify that the publication is valid and applies to this minion ''' mp_call = _metaproxy_call(self.opts, 'target_load') return mp_call(self, load) def _handle_payload(self, payload): mp_call = _metaproxy_call(self.opts, 'handle_payload') return mp_call(self, payload) @tornado.gen.coroutine def _handle_decoded_payload(self, data): mp_call = _metaproxy_call(self.opts, 'handle_decoded_payload') return mp_call(self, data) @classmethod def _target(cls, minion_instance, opts, data, connected): mp_call = _metaproxy_call(opts, 'target') return mp_call(cls, minion_instance, opts, data, connected) @classmethod def _thread_return(cls, minion_instance, opts, data): mp_call = _metaproxy_call(opts, 'thread_return') return mp_call(cls, minion_instance, opts, data) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): mp_call = _metaproxy_call(opts, 'thread_multi_return') return mp_call(cls, minion_instance, opts, data) class SProxyMinion(SMinion): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SProxyMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.opts['grains'] = salt.loader.grains(self.opts) self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], saltenv=self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts: errmsg = ( 'No "proxy" configuration key found in pillar or opts ' 'dictionaries for id {id}. Check your pillar/options ' 'configuration and contents. Salt-proxy aborted.' ).format(id=self.opts['id']) log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) if 'proxy' not in self.opts: self.opts['proxy'] = self.opts['pillar']['proxy'] # Then load the proxy module self.proxy = salt.loader.proxy(self.opts) self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=False, proxy=self.proxy) self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy) self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts, self.functions, proxy=self.proxy) fq_proxyname = self.opts['proxy']['proxytype'] # we can then sync any proxymodules down from the master # we do a sync_all here in case proxy code was installed by # SPM or was manually placed in /srv/salt/_modules etc. self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv']) self.functions.pack['__proxy__'] = self.proxy self.proxy.pack['__salt__'] = self.functions self.proxy.pack['__ret__'] = self.returners self.proxy.pack['__pillar__'] = self.opts['pillar'] # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__ self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.proxy.pack['__utils__'] = self.utils # Reload all modules so all dunder variables are injected self.proxy.reload_modules() if ('{0}.init'.format(fq_proxyname) not in self.proxy or '{0}.shutdown'.format(fq_proxyname) not in self.proxy): errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \ 'Check your proxymodule. Salt-proxy aborted.' log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])() proxy_init_fn = self.proxy[fq_proxyname + '.init'] proxy_init_fn(self.opts) self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy) # Sync the grains here so the proxy can communicate them to the master self.functions['saltutil.sync_grains'](saltenv='base') self.grains_cache = self.opts['grains'] self.ready = True
saltstack/salt
salt/minion.py
master_event
python
def master_event(type, master=None): ''' Centralized master event function which will return event type based on event_map ''' event_map = {'connected': '__master_connected', 'disconnected': '__master_disconnected', 'failback': '__master_failback', 'alive': '__master_alive'} if type == 'alive' and master is not None: return '{0}_{1}'.format(event_map.get(type), master) return event_map.get(type, None)
Centralized master event function which will return event type based on event_map
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L403-L415
null
# -*- coding: utf-8 -*- ''' Routines to set up a minion ''' # Import python libs from __future__ import absolute_import, print_function, with_statement, unicode_literals import functools import os import sys import copy import time import types import signal import random import logging import threading import traceback import contextlib import multiprocessing from random import randint, shuffle from stat import S_IMODE import salt.serializers.msgpack from binascii import crc32 # Import Salt Libs # pylint: disable=import-error,no-name-in-module,redefined-builtin from salt.ext import six from salt._compat import ipaddress from salt.utils.network import parse_host_port from salt.ext.six.moves import range from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO import salt.transport.client import salt.defaults.exitcodes from salt.utils.ctx import RequestContext # pylint: enable=no-name-in-module,redefined-builtin import tornado HAS_PSUTIL = False try: import salt.utils.psutil_compat as psutil HAS_PSUTIL = True except ImportError: pass HAS_RESOURCE = False try: import resource HAS_RESOURCE = True except ImportError: pass try: import zmq.utils.monitor HAS_ZMQ_MONITOR = True except ImportError: HAS_ZMQ_MONITOR = False try: import salt.utils.win_functions HAS_WIN_FUNCTIONS = True except ImportError: HAS_WIN_FUNCTIONS = False # pylint: enable=import-error # Import salt libs import salt import salt.client import salt.crypt import salt.loader import salt.beacons import salt.engines import salt.payload import salt.pillar import salt.syspaths import salt.utils.args import salt.utils.context import salt.utils.data import salt.utils.error import salt.utils.event import salt.utils.files import salt.utils.jid import salt.utils.minion import salt.utils.minions import salt.utils.network import salt.utils.platform import salt.utils.process import salt.utils.schedule import salt.utils.ssdp import salt.utils.user import salt.utils.zeromq import salt.defaults.events import salt.defaults.exitcodes import salt.cli.daemons import salt.log.setup import salt.utils.dictupdate from salt.config import DEFAULT_MINION_OPTS from salt.defaults import DEFAULT_TARGET_DELIM from salt.utils.debug import enable_sigusr1_handler from salt.utils.event import tagify from salt.utils.odict import OrderedDict from salt.utils.process import (default_signals, SignalHandlingMultiprocessingProcess, ProcessManager) from salt.exceptions import ( CommandExecutionError, CommandNotFoundError, SaltInvocationError, SaltReqTimeoutError, SaltClientError, SaltSystemExit, SaltDaemonNotRunning, SaltException, SaltMasterUnresolvableError ) import tornado.gen # pylint: disable=F0401 import tornado.ioloop # pylint: disable=F0401 log = logging.getLogger(__name__) # To set up a minion: # 1. Read in the configuration # 2. Generate the function mapping dict # 3. Authenticate with the master # 4. Store the AES key # 5. Connect to the publisher # 6. Handle publications def resolve_dns(opts, fallback=True): ''' Resolves the master_ip and master_uri options ''' ret = {} check_dns = True if (opts.get('file_client', 'remote') == 'local' and not opts.get('use_master_when_local', False)): check_dns = False # Since salt.log is imported below, salt.utils.network needs to be imported here as well import salt.utils.network if check_dns is True: try: if opts['master'] == '': raise SaltSystemExit ret['master_ip'] = salt.utils.network.dns_check( opts['master'], int(opts['master_port']), True, opts['ipv6'], attempt_connect=False) except SaltClientError: retry_dns_count = opts.get('retry_dns_count', None) if opts['retry_dns']: while True: if retry_dns_count is not None: if retry_dns_count == 0: raise SaltMasterUnresolvableError retry_dns_count -= 1 import salt.log msg = ('Master hostname: \'{0}\' not found or not responsive. ' 'Retrying in {1} seconds').format(opts['master'], opts['retry_dns']) if salt.log.setup.is_console_configured(): log.error(msg) else: print('WARNING: {0}'.format(msg)) time.sleep(opts['retry_dns']) try: ret['master_ip'] = salt.utils.network.dns_check( opts['master'], int(opts['master_port']), True, opts['ipv6'], attempt_connect=False) break except SaltClientError: pass else: if fallback: ret['master_ip'] = '127.0.0.1' else: raise except SaltSystemExit: unknown_str = 'unknown address' master = opts.get('master', unknown_str) if master == '': master = unknown_str if opts.get('__role') == 'syndic': err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'syndic_master\' value in minion config.'.format(master) else: err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'master\' value in minion config.'.format(master) log.error(err) raise SaltSystemExit(code=42, msg=err) else: ret['master_ip'] = '127.0.0.1' if 'master_ip' in ret and 'master_ip' in opts: if ret['master_ip'] != opts['master_ip']: log.warning( 'Master ip address changed from %s to %s', opts['master_ip'], ret['master_ip'] ) if opts['source_interface_name']: log.trace('Custom source interface required: %s', opts['source_interface_name']) interfaces = salt.utils.network.interfaces() log.trace('The following interfaces are available on this Minion:') log.trace(interfaces) if opts['source_interface_name'] in interfaces: if interfaces[opts['source_interface_name']]['up']: addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\ interfaces[opts['source_interface_name']]['inet6'] ret['source_ip'] = addrs[0]['address'] log.debug('Using %s as source IP address', ret['source_ip']) else: log.warning('The interface %s is down so it cannot be used as source to connect to the Master', opts['source_interface_name']) else: log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name']) elif opts['source_address']: ret['source_ip'] = salt.utils.network.dns_check( opts['source_address'], int(opts['source_ret_port']), True, opts['ipv6'], attempt_connect=False) log.debug('Using %s as source IP address', ret['source_ip']) if opts['source_ret_port']: ret['source_ret_port'] = int(opts['source_ret_port']) log.debug('Using %d as source port for the ret server', ret['source_ret_port']) if opts['source_publish_port']: ret['source_publish_port'] = int(opts['source_publish_port']) log.debug('Using %d as source port for the master pub', ret['source_publish_port']) ret['master_uri'] = 'tcp://{ip}:{port}'.format( ip=ret['master_ip'], port=opts['master_port']) log.debug('Master URI: %s', ret['master_uri']) return ret def prep_ip_port(opts): ''' parse host:port values from opts['master'] and return valid: master: ip address or hostname as a string master_port: (optional) master returner port as integer e.g.: - master: 'localhost:1234' -> {'master': 'localhost', 'master_port': 1234} - master: '127.0.0.1:1234' -> {'master': '127.0.0.1', 'master_port' :1234} - master: '[::1]:1234' -> {'master': '::1', 'master_port': 1234} - master: 'fe80::a00:27ff:fedc:ba98' -> {'master': 'fe80::a00:27ff:fedc:ba98'} ''' ret = {} # Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without # a port specified. The is_ipv6 check returns False if brackets are used in the IP # definition such as master: '[::1]:1234'. if opts['master_uri_format'] == 'ip_only': ret['master'] = ipaddress.ip_address(opts['master']) else: host, port = parse_host_port(opts['master']) ret = {'master': host} if port: ret.update({'master_port': port}) return ret def get_proc_dir(cachedir, **kwargs): ''' Given the cache directory, return the directory that process data is stored in, creating it if it doesn't exist. The following optional Keyword Arguments are handled: mode: which is anything os.makedir would accept as mode. uid: the uid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this uid. Must be int. Works only on unix/unix like systems. gid: the gid to set, if not set, or it is None or -1 no changes are made. Same applies if the directory is already owned by this gid. Must be int. Works only on unix/unix like systems. ''' fn_ = os.path.join(cachedir, 'proc') mode = kwargs.pop('mode', None) if mode is None: mode = {} else: mode = {'mode': mode} if not os.path.isdir(fn_): # proc_dir is not present, create it with mode settings os.makedirs(fn_, **mode) d_stat = os.stat(fn_) # if mode is not an empty dict then we have an explicit # dir mode. So lets check if mode needs to be changed. if mode: mode_part = S_IMODE(d_stat.st_mode) if mode_part != mode['mode']: os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode']) if hasattr(os, 'chown'): # only on unix/unix like systems uid = kwargs.pop('uid', -1) gid = kwargs.pop('gid', -1) # if uid and gid are both -1 then go ahead with # no changes at all if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \ [i for i in (uid, gid) if i != -1]: os.chown(fn_, uid, gid) return fn_ def load_args_and_kwargs(func, args, data=None, ignore_invalid=False): ''' Detect the args and kwargs that need to be passed to a function call, and check them against what was passed. ''' argspec = salt.utils.args.get_function_argspec(func) _args = [] _kwargs = {} invalid_kwargs = [] for arg in args: if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True: # if the arg is a dict with __kwarg__ == True, then its a kwarg for key, val in six.iteritems(arg): if argspec.keywords or key in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs[key] = val else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. invalid_kwargs.append('{0}={1}'.format(key, val)) continue else: string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632 if string_kwarg: if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args: # Function supports **kwargs or is a positional argument to # the function. _kwargs.update(string_kwarg) else: # **kwargs not in argspec and parsed argument name not in # list of positional arguments. This keyword argument is # invalid. for key, val in six.iteritems(string_kwarg): invalid_kwargs.append('{0}={1}'.format(key, val)) else: _args.append(arg) if invalid_kwargs and not ignore_invalid: salt.utils.args.invalid_kwargs(invalid_kwargs) if argspec.keywords and isinstance(data, dict): # this function accepts **kwargs, pack in the publish data for key, val in six.iteritems(data): _kwargs['__pub_{0}'.format(key)] = val return _args, _kwargs def eval_master_func(opts): ''' Evaluate master function if master type is 'func' and save it result in opts['master'] ''' if '__master_func_evaluated' not in opts: # split module and function and try loading the module mod_fun = opts['master'] mod, fun = mod_fun.split('.') try: master_mod = salt.loader.raw_mod(opts, mod, fun) if not master_mod: raise KeyError # we take whatever the module returns as master address opts['master'] = master_mod[mod_fun]() # Check for valid types if not isinstance(opts['master'], (six.string_types, list)): raise TypeError opts['__master_func_evaluated'] = True except KeyError: log.error('Failed to load module %s', mod_fun) sys.exit(salt.defaults.exitcodes.EX_GENERIC) except TypeError: log.error('%s returned from %s is not a string', opts['master'], mod_fun) sys.exit(salt.defaults.exitcodes.EX_GENERIC) log.info('Evaluated master from module: %s', mod_fun) def service_name(): ''' Return the proper service name based on platform ''' return 'salt_minion' if 'bsd' in sys.platform else 'salt-minion' class MinionBase(object): def __init__(self, opts): self.opts = opts @staticmethod def process_schedule(minion, loop_interval): try: if hasattr(minion, 'schedule'): minion.schedule.eval() else: log.error('Minion scheduler not initialized. Scheduled jobs will not be run.') return # Check if scheduler requires lower loop interval than # the loop_interval setting if minion.schedule.loop_interval < loop_interval: loop_interval = minion.schedule.loop_interval log.debug( 'Overriding loop_interval because of scheduled jobs.' ) except Exception as exc: log.error('Exception %s occurred in scheduled job', exc) return loop_interval def process_beacons(self, functions): ''' Evaluate all of the configured beacons, grab the config again in case the pillar or grains changed ''' if 'config.merge' in functions: b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True) if b_conf: return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member return [] @tornado.gen.coroutine def eval_master(self, opts, timeout=60, safe=True, failed=False, failback=False): ''' Evaluates and returns a tuple of the current master address and the pub_channel. In standard mode, just creates a pub_channel with the given master address. With master_type=func evaluates the current master address from the given module and then creates a pub_channel. With master_type=failover takes the list of masters and loops through them. The first one that allows the minion to create a pub_channel is then returned. If this function is called outside the minions initialization phase (for example from the minions main event-loop when a master connection loss was detected), 'failed' should be set to True. The current (possibly failed) master will then be removed from the list of masters. ''' # return early if we are not connecting to a master if opts['master_type'] == 'disable': log.warning('Master is set to disable, skipping connection') self.connected = False raise tornado.gen.Return((None, None)) # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. # if we are using multimaster, discovery can only happen at start time # because MinionManager handles it. by eval_master time the minion doesn't # know about other siblings currently running if isinstance(self.opts['discovery'], dict) and not self.opts['discovery'].get('multimaster'): self._discover_masters() # check if master_type was altered from its default if opts['master_type'] != 'str' and opts['__role'] != 'syndic': # check for a valid keyword if opts['master_type'] == 'func': eval_master_func(opts) # if failover or distributed is set, master has to be of type list elif opts['master_type'] in ('failover', 'distributed'): if isinstance(opts['master'], list): log.info( 'Got list of available master addresses: %s', opts['master'] ) if opts['master_type'] == 'distributed': master_len = len(opts['master']) if master_len > 1: secondary_masters = opts['master'][1:] master_idx = crc32(opts['id']) % master_len try: preferred_masters = opts['master'] preferred_masters[0] = opts['master'][master_idx] preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]] opts['master'] = preferred_masters log.info('Distributed to the master at \'%s\'.', opts['master'][0]) except (KeyError, AttributeError, TypeError): log.warning('Failed to distribute to a specific master.') else: log.warning('master_type = distributed needs more than 1 master.') if opts['master_shuffle']: log.warning( 'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor ' 'of \'random_master\'. Please update your minion config file.' ) opts['random_master'] = opts['master_shuffle'] opts['auth_tries'] = 0 if opts['master_failback'] and opts['master_failback_interval'] == 0: opts['master_failback_interval'] = opts['master_alive_interval'] # if opts['master'] is a str and we have never created opts['master_list'] elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts): # We have a string, but a list was what was intended. Convert. # See issue 23611 for details opts['master'] = [opts['master']] elif opts['__role'] == 'syndic': log.info('Syndic setting master_syndic to \'%s\'', opts['master']) # if failed=True, the minion was previously connected # we're probably called from the minions main-event-loop # because a master connection loss was detected. remove # the possibly failed master from the list of masters. elif failed: if failback: # failback list of masters to original config opts['master'] = opts['master_list'] else: log.info( 'Moving possibly failed master %s to the end of ' 'the list of masters', opts['master'] ) if opts['master'] in opts['local_masters']: # create new list of master with the possibly failed # one moved to the end failed_master = opts['master'] opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x] opts['master'].append(failed_master) else: opts['master'] = opts['master_list'] else: msg = ('master_type set to \'failover\' but \'master\' ' 'is not of type list but of type ' '{0}'.format(type(opts['master']))) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # If failover is set, minion have to failover on DNS errors instead of retry DNS resolve. # See issue 21082 for details if opts['retry_dns'] and opts['master_type'] == 'failover': msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. ' 'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.') log.critical(msg) opts['retry_dns'] = 0 else: msg = ('Invalid keyword \'{0}\' for variable ' '\'master_type\''.format(opts['master_type'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # FIXME: if SMinion don't define io_loop, it can't switch master see #29088 # Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop # (The channel factories will set a default if the kwarg isn't passed) factory_kwargs = {'timeout': timeout, 'safe': safe} if getattr(self, 'io_loop', None): factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member tries = opts.get('master_tries', 1) attempts = 0 # if we have a list of masters, loop through them and be # happy with the first one that allows us to connect if isinstance(opts['master'], list): conn = False last_exc = None opts['master_uri_list'] = [] opts['local_masters'] = copy.copy(opts['master']) # shuffle the masters and then loop through them if opts['random_master']: # master_failback is only used when master_type is set to failover if opts['master_type'] == 'failover' and opts['master_failback']: secondary_masters = opts['local_masters'][1:] shuffle(secondary_masters) opts['local_masters'][1:] = secondary_masters else: shuffle(opts['local_masters']) # This sits outside of the connection loop below because it needs to set # up a list of master URIs regardless of which masters are available # to connect _to_. This is primarily used for masterless mode, when # we need a list of master URIs to fire calls back to. for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts['master_uri_list'].append(resolve_dns(opts)['master_uri']) while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) # on first run, update self.opts with the whole master list # to enable a minion to re-use old masters if they get fixed if 'master_list' not in opts: opts['master_list'] = copy.copy(opts['local_masters']) self.opts = opts pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs) try: yield pub_channel.connect() conn = True break except SaltClientError as exc: last_exc = exc if exc.strerror.startswith('Could not access'): msg = ( 'Failed to initiate connection with Master ' '%s: check ownership/permissions. Error ' 'message: %s', opts['master'], exc ) else: msg = ('Master %s could not be reached, trying next ' 'next master (if any)', opts['master']) log.info(msg) continue if not conn: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False self.opts['master'] = copy.copy(self.opts['local_masters']) log.error( 'No master could be reached or all masters ' 'denied the minion\'s connection attempt.' ) # If the code reaches this point, 'last_exc' # should already be set. raise last_exc # pylint: disable=E0702 else: self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) # single master sign in else: if opts['random_master']: log.warning('random_master is True but there is only one master specified. Ignoring.') while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) try: if self.opts['transport'] == 'detect': self.opts['detect_mode'] = True for trans in ('zeromq', 'tcp'): if trans == 'zeromq' and not zmq: continue self.opts['transport'] = trans pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() if not pub_channel.auth.authenticated: continue del self.opts['detect_mode'] break else: pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) except SaltClientError as exc: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False raise exc def _discover_masters(self): ''' Discover master(s) and decide where to connect, if SSDP is around. This modifies the configuration on the fly. :return: ''' if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False: master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient() masters = {} for att in range(self.opts['discovery'].get('attempts', 3)): try: att += 1 log.info('Attempting %s time(s) to discover masters', att) masters.update(master_discovery_client.discover()) if not masters: time.sleep(self.opts['discovery'].get('pause', 5)) else: break except Exception as err: log.error('SSDP discovery failure: %s', err) break if masters: policy = self.opts.get('discovery', {}).get('match', 'any') if policy not in ['any', 'all']: log.error('SSDP configuration matcher failure: unknown value "%s". ' 'Should be "any" or "all"', policy) return mapping = self.opts['discovery'].get('mapping', {}) discovered = [] for addr, mappings in masters.items(): for proto_data in mappings: cnt = len([key for key, value in mapping.items() if proto_data.get('mapping', {}).get(key) == value]) if policy == 'any' and bool(cnt) or cnt == len(mapping): if self.opts['discovery'].get('multimaster'): discovered.append(proto_data['master']) else: self.opts['master'] = proto_data['master'] return self.opts['master'] = discovered def _return_retry_timer(self): ''' Based on the minion configuration, either return a randomized timer or just return the value of the return_retry_timer. ''' msg = 'Minion return retry timer set to %s seconds' if self.opts.get('return_retry_timer_max'): try: random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max']) retry_msg = msg % random_retry log.debug('%s (randomized)', msg % random_retry) return random_retry except ValueError: # Catch wiseguys using negative integers here log.error( 'Invalid value (return_retry_timer: %s or ' 'return_retry_timer_max: %s). Both must be positive ' 'integers.', self.opts['return_retry_timer'], self.opts['return_retry_timer_max'], ) log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer']) return DEFAULT_MINION_OPTS['return_retry_timer'] else: log.debug(msg, self.opts.get('return_retry_timer')) return self.opts.get('return_retry_timer') class SMinion(MinionBase): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def __init__(self, opts): # Late setup of the opts grains, so we can log from the grains module import salt.loader opts['grains'] = salt.loader.grains(opts) super(SMinion, self).__init__(opts) # run ssdp discovery if necessary self._discover_masters() # Clean out the proc directory (default /var/cache/salt/minion/proc) if (self.opts.get('file_client', 'remote') == 'remote' or self.opts.get('use_master_when_local', False)): install_zmq() io_loop = ZMQDefaultLoop.current() io_loop.run_sync( lambda: self.eval_master(self.opts, failed=True) ) self.gen_modules(initial_load=True) # If configured, cache pillar data on the minion if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False): import salt.utils.yaml pdir = os.path.join(self.opts['cachedir'], 'pillar') if not os.path.isdir(pdir): os.makedirs(pdir, 0o700) ptop = os.path.join(pdir, 'top.sls') if self.opts['saltenv'] is not None: penv = self.opts['saltenv'] else: penv = 'base' cache_top = {penv: {self.opts['id']: ['cache']}} with salt.utils.files.fopen(ptop, 'wb') as fp_: salt.utils.yaml.safe_dump(cache_top, fp_) os.chmod(ptop, 0o600) cache_sls = os.path.join(pdir, 'cache.sls') with salt.utils.files.fopen(cache_sls, 'wb') as fp_: salt.utils.yaml.safe_dump(self.opts['pillar'], fp_) os.chmod(cache_sls, 0o600) def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.serializers = salt.loader.serializers(self.opts) self.returners = salt.loader.returners(self.opts, self.functions) self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None) # TODO: remove self.function_errors = {} # Keep the funcs clean self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers) self.rend = salt.loader.render(self.opts, self.functions) # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts) class MasterMinion(object): ''' Create a fully loaded minion function object for generic use on the master. What makes this class different is that the pillar is omitted, otherwise everything else is loaded cleanly. ''' def __init__( self, opts, returners=True, states=True, rend=True, matcher=True, whitelist=None, ignore_config_errors=True): self.opts = salt.config.minion_config( opts['conf_file'], ignore_config_errors=ignore_config_errors, role='master' ) self.opts.update(opts) self.whitelist = whitelist self.opts['grains'] = salt.loader.grains(opts) self.opts['pillar'] = {} self.mk_returners = returners self.mk_states = states self.mk_rend = rend self.mk_matcher = matcher self.gen_modules(initial_load=True) def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods( self.opts, utils=self.utils, whitelist=self.whitelist, initial_load=initial_load) self.serializers = salt.loader.serializers(self.opts) if self.mk_returners: self.returners = salt.loader.returners(self.opts, self.functions) if self.mk_states: self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers) if self.mk_rend: self.rend = salt.loader.render(self.opts, self.functions) if self.mk_matcher: self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules class MinionManager(MinionBase): ''' Create a multi minion interface, this creates as many minions as are defined in the master option and binds each minion object to a respective master. ''' def __init__(self, opts): super(MinionManager, self).__init__(opts) self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self.minions = [] self.jid_queue = [] install_zmq() self.io_loop = ZMQDefaultLoop.current() self.process_manager = ProcessManager(name='MultiMinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat def __del__(self): self.destroy() def _bind(self): # start up the event publisher, so we can see events during startup self.event_publisher = salt.utils.event.AsyncEventPublisher( self.opts, io_loop=self.io_loop, ) self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop) self.event.subscribe('') self.event.set_event_handler(self.handle_event) @tornado.gen.coroutine def handle_event(self, package): yield [minion.handle_event(package) for minion in self.minions] def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None): ''' Helper function to return the correct type of object ''' return Minion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue) def _check_minions(self): ''' Check the size of self.minions and raise an error if it's empty ''' if not self.minions: err = ('Minion unable to successfully connect to ' 'a Salt Master.') log.error(err) def _spawn_minions(self, timeout=60): ''' Spawn all the coroutines which will sign in to masters ''' # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. If match is 'any' we let # eval_master handle the discovery instead so disconnections can also handle # discovery if isinstance(self.opts['discovery'], dict) and self.opts['discovery'].get('multimaster'): self._discover_masters() masters = self.opts['master'] if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list): masters = [masters] for master in masters: s_opts = copy.deepcopy(self.opts) s_opts['master'] = master s_opts['multimaster'] = True minion = self._create_minion_object(s_opts, s_opts['auth_timeout'], False, io_loop=self.io_loop, loaded_base_name='salt.loader.{0}'.format(s_opts['master']), jid_queue=self.jid_queue) self.io_loop.spawn_callback(self._connect_minion, minion) self.io_loop.call_later(timeout, self._check_minions) @tornado.gen.coroutine def _connect_minion(self, minion): ''' Create a minion, and asynchronously connect it to a master ''' auth_wait = minion.opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", minion.opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? try: if minion.opts.get('beacons_before_connect', False): minion.setup_beacons(before_connect=True) if minion.opts.get('scheduler_before_connect', False): minion.setup_scheduler(before_connect=True) yield minion.connect_master(failed=failed) minion.tune_in(start=False) self.minions.append(minion) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up minion for multi-master. Is ' 'master at %s responding?', minion.opts['master'] ) except SaltMasterUnresolvableError: err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'master\' value in minion config.'.format(minion.opts['master']) log.error(err) break except Exception as e: failed = True log.critical( 'Unexpected error while connecting to %s', minion.opts['master'], exc_info=True ) # Multi Master Tune In def tune_in(self): ''' Bind to the masters This loop will attempt to create connections to masters it hasn't connected to yet, but once the initial connection is made it is up to ZMQ to do the reconnect (don't know of an API to get the state here in salt) ''' self._bind() # Fire off all the minion coroutines self._spawn_minions() # serve forever! self.io_loop.start() @property def restart(self): for minion in self.minions: if minion.restart: return True return False def stop(self, signum): for minion in self.minions: minion.process_manager.stop_restarting() minion.process_manager.send_signal_to_processes(signum) # kill any remaining processes minion.process_manager.kill_children() minion.destroy() def destroy(self): for minion in self.minions: minion.destroy() class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy() class Syndic(Minion): ''' Make a Syndic minion, this minion will use the minion keys on the master to authenticate with a higher level master. ''' def __init__(self, opts, **kwargs): self._syndic_interface = opts.get('interface') self._syndic = True # force auth_safemode True because Syndic don't support autorestart opts['auth_safemode'] = True opts['loop_interval'] = 1 super(Syndic, self).__init__(opts, **kwargs) self.mminion = salt.minion.MasterMinion(opts) self.jid_forward_cache = set() self.jids = {} self.raw_events = [] self.pub_future = None def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # TODO: even do this?? data['to'] = int(data.get('to', self.opts['timeout'])) - 1 # Only forward the command if it didn't originate from ourselves if data.get('master_id', 0) != self.opts.get('master_id', 1): self.syndic_cmd(data) def syndic_cmd(self, data): ''' Take the now clear load and forward it on to the client cmd ''' # Set up default tgt_type if 'tgt_type' not in data: data['tgt_type'] = 'glob' kwargs = {} # optionally add a few fields to the publish data for field in ('master_id', # which master the job came from 'user', # which user ran the job ): if field in data: kwargs[field] = data[field] def timeout_handler(*args): log.warning('Unable to forward pub data: %s', args[1]) return True with tornado.stack_context.ExceptionStackContext(timeout_handler): self.local.pub_async(data['tgt'], data['fun'], data['arg'], data['tgt_type'], data['ret'], data['jid'], data['to'], io_loop=self.io_loop, callback=lambda _: None, **kwargs) def fire_master_syndic_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to false in Sodium release. self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'syndic_start', sync=False, ) self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'syndic'), sync=False, ) # TODO: clean up docs def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) # add handler to subscriber self.pub_channel.on_recv(self._process_cmd_socket) def _process_cmd_socket(self, payload): if payload is not None and payload['enc'] == 'aes': log.trace('Handling payload') self._handle_decoded_payload(payload['load']) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the syndic currently has no need. @tornado.gen.coroutine def reconnect(self): if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication master, self.pub_channel = yield self.eval_master(opts=self.opts) if self.connected: self.opts['master'] = master self.pub_channel.on_recv(self._process_cmd_socket) log.info('Minion is ready to receive requests!') raise tornado.gen.Return(self) def destroy(self): ''' Tear down the syndic minion ''' # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local if hasattr(self, 'forward_events'): self.forward_events.stop() # TODO: need a way of knowing if the syndic connection is busted class SyndicManager(MinionBase): ''' Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Modes (controlled by `syndic_mode`: sync: This mode will synchronize all events and publishes from higher level masters cluster: This mode will only sync job publishes and returns Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~15s trying to forward events to the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts, io_loop=None): opts['loop_interval'] = 1 super(SyndicManager, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) self.syndic_mode = self.opts.get('syndic_mode', 'sync') self.syndic_failover = self.opts.get('syndic_failover', 'random') self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self._has_master = threading.Event() self.jid_forward_cache = set() if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # List of events self.raw_events = [] # Dict of rets: {master_id: {event_tag: job_ret, ...}, ...} self.job_rets = {} # List of delayed job_rets which was unable to send for some reason and will be resend to # any available master self.delayed = [] # Active pub futures: {master_id: (future, [job_ret, ...]), ...} self.pub_futures = {} def _spawn_syndics(self): ''' Spawn all the coroutines which will sign in the syndics ''' self._syndics = OrderedDict() # mapping of opts['master'] -> syndic masters = self.opts['master'] if not isinstance(masters, list): masters = [masters] for master in masters: s_opts = copy.copy(self.opts) s_opts['master'] = master self._syndics[master] = self._connect_syndic(s_opts) @tornado.gen.coroutine def _connect_syndic(self, opts): ''' Create a syndic, and asynchronously connect it to a master ''' auth_wait = opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? log.debug( 'Syndic attempting to connect to %s', opts['master'] ) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, io_loop=self.io_loop, ) yield syndic.connect_master(failed=failed) # set up the syndic to handle publishes (specifically not event forwarding) syndic.tune_in_no_block() # Send an event to the master that the minion is live syndic.fire_master_syndic_start() log.info( 'Syndic successfully connected to %s', opts['master'] ) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up syndic for multi-syndic. Is the ' 'master at %s responding?', opts['master'] ) except (KeyboardInterrupt, SystemExit): raise except Exception: failed = True log.critical( 'Unexpected error while connecting to %s', opts['master'], exc_info=True ) raise tornado.gen.Return(syndic) def _mark_master_dead(self, master): ''' Mark a master as dead. This will start the sign-in routine ''' # if its connected, mark it dead if self._syndics[master].done(): syndic = self._syndics[master].result() # pylint: disable=no-member self._syndics[master] = syndic.reconnect() else: # TODO: debug? log.info( 'Attempting to mark %s as dead, although it is already ' 'marked dead', master ) def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} successful = False # Call for each master for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) successful = True except SaltClientError: log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) if not successful: log.critical('Unable to call %s on any masters!', func) def _return_pub_syndic(self, values, master_id=None): ''' Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for ''' func = '_return_pub_multi' for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue future, data = self.pub_futures.get(master, (None, None)) if future is not None: if not future.done(): if master == master_id: # Targeted master previous send not done yet, call again later return False else: # Fallback master is busy, try the next one continue elif future.exception(): # Previous execution on this master returned an error log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) del self.pub_futures[master] # Add not sent data to the delayed list and try the next master self.delayed.extend(data) continue future = getattr(syndic_future.result(), func)(values, '_syndic_return', timeout=self._return_retry_timer(), sync=False) self.pub_futures[master] = (future, values) return True # Loop done and didn't exit: wasn't sent, try again later return False def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self._syndics.keys()) if self.opts['syndic_failover'] == 'random': shuffle(masters) if master_id not in self._syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self._syndics[master_id] if not masters: break master_id = masters.pop(0) def _reset_event_aggregation(self): self.job_rets = {} self.raw_events = [] def reconnect_event_bus(self, something): future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' self._spawn_syndics() # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) self.local.event.subscribe('') log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id']) # register the event sub to the poller self.job_rets = {} self.raw_events = [] self._reset_event_aggregation() future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, self.opts['syndic_event_forward_timeout'] * 1000, ) self.forward_events.start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() self.io_loop.start() def _process_event(self, raw): # TODO: cleanup: Move down into event class mtag, data = self.local.event.unpack(raw, self.local.event.serial) log.trace('Got event %s', mtag) # pylint: disable=no-member tag_parts = mtag.split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in data: if 'jid' not in data: # Not a job return return if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1): log.debug('Return received with matching master_id, not forwarding') return master = data.get('master_id') jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {}) if not jdict: jdict['__fun__'] = data.get('fun') jdict['__jid__'] = data['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if data['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](data['jid']) ) self.jid_forward_cache.add(data['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if master is not None: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = master ret = {} for key in 'return', 'retcode', 'success': if key in data: ret[key] = data[key] jdict[data['id']] = ret else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events if self.syndic_mode == 'sync': # Add generic event aggregation here if 'retcode' not in data: self.raw_events.append({'data': data, 'tag': mtag}) def _forward_events(self): log.trace('Forwarding events') # pylint: disable=no-member if self.raw_events: events = self.raw_events self.raw_events = [] self._call_syndic('_fire_master', kwargs={'events': events, 'pretag': tagify(self.opts['id'], base='syndic'), 'timeout': self._return_retry_timer(), 'sync': False, }, ) if self.delayed: res = self._return_pub_syndic(self.delayed) if res: self.delayed = [] for master in list(six.iterkeys(self.job_rets)): values = list(six.itervalues(self.job_rets[master])) res = self._return_pub_syndic(values, master_id=master) if res: del self.job_rets[master] class ProxyMinionManager(MinionManager): ''' Create the multi-minion interface but for proxy minions ''' def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None): ''' Helper function to return the correct type of object ''' return ProxyMinion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue) def _metaproxy_call(opts, fn_name): metaproxy = salt.loader.metaproxy(opts) try: metaproxy_name = opts['metaproxy'] except KeyError: metaproxy_name = 'proxy' log.trace( 'No metaproxy key found in opts for id %s. ' 'Defaulting to standard proxy minion.', opts['id'] ) metaproxy_fn = metaproxy_name + '.' + fn_name return metaproxy[metaproxy_fn] class ProxyMinion(Minion): ''' This class instantiates a 'proxy' minion--a minion that does not manipulate the host it runs on, but instead manipulates a device that cannot run a minion. ''' # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check Minion._post_master_init to see if those changes need to be propagated. ProxyMinions need a significantly different post master setup, which is why the differences are not factored out into separate helper functions. ''' mp_call = _metaproxy_call(self.opts, 'post_master_init') return mp_call(self, master) def _target_load(self, load): ''' Verify that the publication is valid and applies to this minion ''' mp_call = _metaproxy_call(self.opts, 'target_load') return mp_call(self, load) def _handle_payload(self, payload): mp_call = _metaproxy_call(self.opts, 'handle_payload') return mp_call(self, payload) @tornado.gen.coroutine def _handle_decoded_payload(self, data): mp_call = _metaproxy_call(self.opts, 'handle_decoded_payload') return mp_call(self, data) @classmethod def _target(cls, minion_instance, opts, data, connected): mp_call = _metaproxy_call(opts, 'target') return mp_call(cls, minion_instance, opts, data, connected) @classmethod def _thread_return(cls, minion_instance, opts, data): mp_call = _metaproxy_call(opts, 'thread_return') return mp_call(cls, minion_instance, opts, data) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): mp_call = _metaproxy_call(opts, 'thread_multi_return') return mp_call(cls, minion_instance, opts, data) class SProxyMinion(SMinion): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SProxyMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.opts['grains'] = salt.loader.grains(self.opts) self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], saltenv=self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts: errmsg = ( 'No "proxy" configuration key found in pillar or opts ' 'dictionaries for id {id}. Check your pillar/options ' 'configuration and contents. Salt-proxy aborted.' ).format(id=self.opts['id']) log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) if 'proxy' not in self.opts: self.opts['proxy'] = self.opts['pillar']['proxy'] # Then load the proxy module self.proxy = salt.loader.proxy(self.opts) self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=False, proxy=self.proxy) self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy) self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts, self.functions, proxy=self.proxy) fq_proxyname = self.opts['proxy']['proxytype'] # we can then sync any proxymodules down from the master # we do a sync_all here in case proxy code was installed by # SPM or was manually placed in /srv/salt/_modules etc. self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv']) self.functions.pack['__proxy__'] = self.proxy self.proxy.pack['__salt__'] = self.functions self.proxy.pack['__ret__'] = self.returners self.proxy.pack['__pillar__'] = self.opts['pillar'] # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__ self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.proxy.pack['__utils__'] = self.utils # Reload all modules so all dunder variables are injected self.proxy.reload_modules() if ('{0}.init'.format(fq_proxyname) not in self.proxy or '{0}.shutdown'.format(fq_proxyname) not in self.proxy): errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \ 'Check your proxymodule. Salt-proxy aborted.' log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])() proxy_init_fn = self.proxy[fq_proxyname + '.init'] proxy_init_fn(self.opts) self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy) # Sync the grains here so the proxy can communicate them to the master self.functions['saltutil.sync_grains'](saltenv='base') self.grains_cache = self.opts['grains'] self.ready = True
saltstack/salt
salt/minion.py
MinionBase.process_beacons
python
def process_beacons(self, functions): ''' Evaluate all of the configured beacons, grab the config again in case the pillar or grains changed ''' if 'config.merge' in functions: b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True) if b_conf: return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member return []
Evaluate all of the configured beacons, grab the config again in case the pillar or grains changed
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L448-L457
null
class MinionBase(object): def __init__(self, opts): self.opts = opts @staticmethod def process_schedule(minion, loop_interval): try: if hasattr(minion, 'schedule'): minion.schedule.eval() else: log.error('Minion scheduler not initialized. Scheduled jobs will not be run.') return # Check if scheduler requires lower loop interval than # the loop_interval setting if minion.schedule.loop_interval < loop_interval: loop_interval = minion.schedule.loop_interval log.debug( 'Overriding loop_interval because of scheduled jobs.' ) except Exception as exc: log.error('Exception %s occurred in scheduled job', exc) return loop_interval @tornado.gen.coroutine def eval_master(self, opts, timeout=60, safe=True, failed=False, failback=False): ''' Evaluates and returns a tuple of the current master address and the pub_channel. In standard mode, just creates a pub_channel with the given master address. With master_type=func evaluates the current master address from the given module and then creates a pub_channel. With master_type=failover takes the list of masters and loops through them. The first one that allows the minion to create a pub_channel is then returned. If this function is called outside the minions initialization phase (for example from the minions main event-loop when a master connection loss was detected), 'failed' should be set to True. The current (possibly failed) master will then be removed from the list of masters. ''' # return early if we are not connecting to a master if opts['master_type'] == 'disable': log.warning('Master is set to disable, skipping connection') self.connected = False raise tornado.gen.Return((None, None)) # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. # if we are using multimaster, discovery can only happen at start time # because MinionManager handles it. by eval_master time the minion doesn't # know about other siblings currently running if isinstance(self.opts['discovery'], dict) and not self.opts['discovery'].get('multimaster'): self._discover_masters() # check if master_type was altered from its default if opts['master_type'] != 'str' and opts['__role'] != 'syndic': # check for a valid keyword if opts['master_type'] == 'func': eval_master_func(opts) # if failover or distributed is set, master has to be of type list elif opts['master_type'] in ('failover', 'distributed'): if isinstance(opts['master'], list): log.info( 'Got list of available master addresses: %s', opts['master'] ) if opts['master_type'] == 'distributed': master_len = len(opts['master']) if master_len > 1: secondary_masters = opts['master'][1:] master_idx = crc32(opts['id']) % master_len try: preferred_masters = opts['master'] preferred_masters[0] = opts['master'][master_idx] preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]] opts['master'] = preferred_masters log.info('Distributed to the master at \'%s\'.', opts['master'][0]) except (KeyError, AttributeError, TypeError): log.warning('Failed to distribute to a specific master.') else: log.warning('master_type = distributed needs more than 1 master.') if opts['master_shuffle']: log.warning( 'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor ' 'of \'random_master\'. Please update your minion config file.' ) opts['random_master'] = opts['master_shuffle'] opts['auth_tries'] = 0 if opts['master_failback'] and opts['master_failback_interval'] == 0: opts['master_failback_interval'] = opts['master_alive_interval'] # if opts['master'] is a str and we have never created opts['master_list'] elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts): # We have a string, but a list was what was intended. Convert. # See issue 23611 for details opts['master'] = [opts['master']] elif opts['__role'] == 'syndic': log.info('Syndic setting master_syndic to \'%s\'', opts['master']) # if failed=True, the minion was previously connected # we're probably called from the minions main-event-loop # because a master connection loss was detected. remove # the possibly failed master from the list of masters. elif failed: if failback: # failback list of masters to original config opts['master'] = opts['master_list'] else: log.info( 'Moving possibly failed master %s to the end of ' 'the list of masters', opts['master'] ) if opts['master'] in opts['local_masters']: # create new list of master with the possibly failed # one moved to the end failed_master = opts['master'] opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x] opts['master'].append(failed_master) else: opts['master'] = opts['master_list'] else: msg = ('master_type set to \'failover\' but \'master\' ' 'is not of type list but of type ' '{0}'.format(type(opts['master']))) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # If failover is set, minion have to failover on DNS errors instead of retry DNS resolve. # See issue 21082 for details if opts['retry_dns'] and opts['master_type'] == 'failover': msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. ' 'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.') log.critical(msg) opts['retry_dns'] = 0 else: msg = ('Invalid keyword \'{0}\' for variable ' '\'master_type\''.format(opts['master_type'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # FIXME: if SMinion don't define io_loop, it can't switch master see #29088 # Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop # (The channel factories will set a default if the kwarg isn't passed) factory_kwargs = {'timeout': timeout, 'safe': safe} if getattr(self, 'io_loop', None): factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member tries = opts.get('master_tries', 1) attempts = 0 # if we have a list of masters, loop through them and be # happy with the first one that allows us to connect if isinstance(opts['master'], list): conn = False last_exc = None opts['master_uri_list'] = [] opts['local_masters'] = copy.copy(opts['master']) # shuffle the masters and then loop through them if opts['random_master']: # master_failback is only used when master_type is set to failover if opts['master_type'] == 'failover' and opts['master_failback']: secondary_masters = opts['local_masters'][1:] shuffle(secondary_masters) opts['local_masters'][1:] = secondary_masters else: shuffle(opts['local_masters']) # This sits outside of the connection loop below because it needs to set # up a list of master URIs regardless of which masters are available # to connect _to_. This is primarily used for masterless mode, when # we need a list of master URIs to fire calls back to. for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts['master_uri_list'].append(resolve_dns(opts)['master_uri']) while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) # on first run, update self.opts with the whole master list # to enable a minion to re-use old masters if they get fixed if 'master_list' not in opts: opts['master_list'] = copy.copy(opts['local_masters']) self.opts = opts pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs) try: yield pub_channel.connect() conn = True break except SaltClientError as exc: last_exc = exc if exc.strerror.startswith('Could not access'): msg = ( 'Failed to initiate connection with Master ' '%s: check ownership/permissions. Error ' 'message: %s', opts['master'], exc ) else: msg = ('Master %s could not be reached, trying next ' 'next master (if any)', opts['master']) log.info(msg) continue if not conn: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False self.opts['master'] = copy.copy(self.opts['local_masters']) log.error( 'No master could be reached or all masters ' 'denied the minion\'s connection attempt.' ) # If the code reaches this point, 'last_exc' # should already be set. raise last_exc # pylint: disable=E0702 else: self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) # single master sign in else: if opts['random_master']: log.warning('random_master is True but there is only one master specified. Ignoring.') while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) try: if self.opts['transport'] == 'detect': self.opts['detect_mode'] = True for trans in ('zeromq', 'tcp'): if trans == 'zeromq' and not zmq: continue self.opts['transport'] = trans pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() if not pub_channel.auth.authenticated: continue del self.opts['detect_mode'] break else: pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) except SaltClientError as exc: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False raise exc def _discover_masters(self): ''' Discover master(s) and decide where to connect, if SSDP is around. This modifies the configuration on the fly. :return: ''' if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False: master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient() masters = {} for att in range(self.opts['discovery'].get('attempts', 3)): try: att += 1 log.info('Attempting %s time(s) to discover masters', att) masters.update(master_discovery_client.discover()) if not masters: time.sleep(self.opts['discovery'].get('pause', 5)) else: break except Exception as err: log.error('SSDP discovery failure: %s', err) break if masters: policy = self.opts.get('discovery', {}).get('match', 'any') if policy not in ['any', 'all']: log.error('SSDP configuration matcher failure: unknown value "%s". ' 'Should be "any" or "all"', policy) return mapping = self.opts['discovery'].get('mapping', {}) discovered = [] for addr, mappings in masters.items(): for proto_data in mappings: cnt = len([key for key, value in mapping.items() if proto_data.get('mapping', {}).get(key) == value]) if policy == 'any' and bool(cnt) or cnt == len(mapping): if self.opts['discovery'].get('multimaster'): discovered.append(proto_data['master']) else: self.opts['master'] = proto_data['master'] return self.opts['master'] = discovered def _return_retry_timer(self): ''' Based on the minion configuration, either return a randomized timer or just return the value of the return_retry_timer. ''' msg = 'Minion return retry timer set to %s seconds' if self.opts.get('return_retry_timer_max'): try: random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max']) retry_msg = msg % random_retry log.debug('%s (randomized)', msg % random_retry) return random_retry except ValueError: # Catch wiseguys using negative integers here log.error( 'Invalid value (return_retry_timer: %s or ' 'return_retry_timer_max: %s). Both must be positive ' 'integers.', self.opts['return_retry_timer'], self.opts['return_retry_timer_max'], ) log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer']) return DEFAULT_MINION_OPTS['return_retry_timer'] else: log.debug(msg, self.opts.get('return_retry_timer')) return self.opts.get('return_retry_timer')
saltstack/salt
salt/minion.py
MinionBase.eval_master
python
def eval_master(self, opts, timeout=60, safe=True, failed=False, failback=False): ''' Evaluates and returns a tuple of the current master address and the pub_channel. In standard mode, just creates a pub_channel with the given master address. With master_type=func evaluates the current master address from the given module and then creates a pub_channel. With master_type=failover takes the list of masters and loops through them. The first one that allows the minion to create a pub_channel is then returned. If this function is called outside the minions initialization phase (for example from the minions main event-loop when a master connection loss was detected), 'failed' should be set to True. The current (possibly failed) master will then be removed from the list of masters. ''' # return early if we are not connecting to a master if opts['master_type'] == 'disable': log.warning('Master is set to disable, skipping connection') self.connected = False raise tornado.gen.Return((None, None)) # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. # if we are using multimaster, discovery can only happen at start time # because MinionManager handles it. by eval_master time the minion doesn't # know about other siblings currently running if isinstance(self.opts['discovery'], dict) and not self.opts['discovery'].get('multimaster'): self._discover_masters() # check if master_type was altered from its default if opts['master_type'] != 'str' and opts['__role'] != 'syndic': # check for a valid keyword if opts['master_type'] == 'func': eval_master_func(opts) # if failover or distributed is set, master has to be of type list elif opts['master_type'] in ('failover', 'distributed'): if isinstance(opts['master'], list): log.info( 'Got list of available master addresses: %s', opts['master'] ) if opts['master_type'] == 'distributed': master_len = len(opts['master']) if master_len > 1: secondary_masters = opts['master'][1:] master_idx = crc32(opts['id']) % master_len try: preferred_masters = opts['master'] preferred_masters[0] = opts['master'][master_idx] preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]] opts['master'] = preferred_masters log.info('Distributed to the master at \'%s\'.', opts['master'][0]) except (KeyError, AttributeError, TypeError): log.warning('Failed to distribute to a specific master.') else: log.warning('master_type = distributed needs more than 1 master.') if opts['master_shuffle']: log.warning( 'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor ' 'of \'random_master\'. Please update your minion config file.' ) opts['random_master'] = opts['master_shuffle'] opts['auth_tries'] = 0 if opts['master_failback'] and opts['master_failback_interval'] == 0: opts['master_failback_interval'] = opts['master_alive_interval'] # if opts['master'] is a str and we have never created opts['master_list'] elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts): # We have a string, but a list was what was intended. Convert. # See issue 23611 for details opts['master'] = [opts['master']] elif opts['__role'] == 'syndic': log.info('Syndic setting master_syndic to \'%s\'', opts['master']) # if failed=True, the minion was previously connected # we're probably called from the minions main-event-loop # because a master connection loss was detected. remove # the possibly failed master from the list of masters. elif failed: if failback: # failback list of masters to original config opts['master'] = opts['master_list'] else: log.info( 'Moving possibly failed master %s to the end of ' 'the list of masters', opts['master'] ) if opts['master'] in opts['local_masters']: # create new list of master with the possibly failed # one moved to the end failed_master = opts['master'] opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x] opts['master'].append(failed_master) else: opts['master'] = opts['master_list'] else: msg = ('master_type set to \'failover\' but \'master\' ' 'is not of type list but of type ' '{0}'.format(type(opts['master']))) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # If failover is set, minion have to failover on DNS errors instead of retry DNS resolve. # See issue 21082 for details if opts['retry_dns'] and opts['master_type'] == 'failover': msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. ' 'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.') log.critical(msg) opts['retry_dns'] = 0 else: msg = ('Invalid keyword \'{0}\' for variable ' '\'master_type\''.format(opts['master_type'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # FIXME: if SMinion don't define io_loop, it can't switch master see #29088 # Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop # (The channel factories will set a default if the kwarg isn't passed) factory_kwargs = {'timeout': timeout, 'safe': safe} if getattr(self, 'io_loop', None): factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member tries = opts.get('master_tries', 1) attempts = 0 # if we have a list of masters, loop through them and be # happy with the first one that allows us to connect if isinstance(opts['master'], list): conn = False last_exc = None opts['master_uri_list'] = [] opts['local_masters'] = copy.copy(opts['master']) # shuffle the masters and then loop through them if opts['random_master']: # master_failback is only used when master_type is set to failover if opts['master_type'] == 'failover' and opts['master_failback']: secondary_masters = opts['local_masters'][1:] shuffle(secondary_masters) opts['local_masters'][1:] = secondary_masters else: shuffle(opts['local_masters']) # This sits outside of the connection loop below because it needs to set # up a list of master URIs regardless of which masters are available # to connect _to_. This is primarily used for masterless mode, when # we need a list of master URIs to fire calls back to. for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts['master_uri_list'].append(resolve_dns(opts)['master_uri']) while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) # on first run, update self.opts with the whole master list # to enable a minion to re-use old masters if they get fixed if 'master_list' not in opts: opts['master_list'] = copy.copy(opts['local_masters']) self.opts = opts pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs) try: yield pub_channel.connect() conn = True break except SaltClientError as exc: last_exc = exc if exc.strerror.startswith('Could not access'): msg = ( 'Failed to initiate connection with Master ' '%s: check ownership/permissions. Error ' 'message: %s', opts['master'], exc ) else: msg = ('Master %s could not be reached, trying next ' 'next master (if any)', opts['master']) log.info(msg) continue if not conn: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False self.opts['master'] = copy.copy(self.opts['local_masters']) log.error( 'No master could be reached or all masters ' 'denied the minion\'s connection attempt.' ) # If the code reaches this point, 'last_exc' # should already be set. raise last_exc # pylint: disable=E0702 else: self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) # single master sign in else: if opts['random_master']: log.warning('random_master is True but there is only one master specified. Ignoring.') while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) try: if self.opts['transport'] == 'detect': self.opts['detect_mode'] = True for trans in ('zeromq', 'tcp'): if trans == 'zeromq' and not zmq: continue self.opts['transport'] = trans pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() if not pub_channel.auth.authenticated: continue del self.opts['detect_mode'] break else: pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) except SaltClientError as exc: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False raise exc
Evaluates and returns a tuple of the current master address and the pub_channel. In standard mode, just creates a pub_channel with the given master address. With master_type=func evaluates the current master address from the given module and then creates a pub_channel. With master_type=failover takes the list of masters and loops through them. The first one that allows the minion to create a pub_channel is then returned. If this function is called outside the minions initialization phase (for example from the minions main event-loop when a master connection loss was detected), 'failed' should be set to True. The current (possibly failed) master will then be removed from the list of masters.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L460-L729
[ "def eval_master_func(opts):\n '''\n Evaluate master function if master type is 'func'\n and save it result in opts['master']\n '''\n if '__master_func_evaluated' not in opts:\n # split module and function and try loading the module\n mod_fun = opts['master']\n mod, fun = mod_fun.split('.')\n try:\n master_mod = salt.loader.raw_mod(opts, mod, fun)\n if not master_mod:\n raise KeyError\n # we take whatever the module returns as master address\n opts['master'] = master_mod[mod_fun]()\n # Check for valid types\n if not isinstance(opts['master'], (six.string_types, list)):\n raise TypeError\n opts['__master_func_evaluated'] = True\n except KeyError:\n log.error('Failed to load module %s', mod_fun)\n sys.exit(salt.defaults.exitcodes.EX_GENERIC)\n except TypeError:\n log.error('%s returned from %s is not a string', opts['master'], mod_fun)\n sys.exit(salt.defaults.exitcodes.EX_GENERIC)\n log.info('Evaluated master from module: %s', mod_fun)\n", "def resolve_dns(opts, fallback=True):\n '''\n Resolves the master_ip and master_uri options\n '''\n ret = {}\n check_dns = True\n if (opts.get('file_client', 'remote') == 'local' and\n not opts.get('use_master_when_local', False)):\n check_dns = False\n # Since salt.log is imported below, salt.utils.network needs to be imported here as well\n import salt.utils.network\n\n if check_dns is True:\n try:\n if opts['master'] == '':\n raise SaltSystemExit\n ret['master_ip'] = salt.utils.network.dns_check(\n opts['master'],\n int(opts['master_port']),\n True,\n opts['ipv6'],\n attempt_connect=False)\n except SaltClientError:\n retry_dns_count = opts.get('retry_dns_count', None)\n if opts['retry_dns']:\n while True:\n if retry_dns_count is not None:\n if retry_dns_count == 0:\n raise SaltMasterUnresolvableError\n retry_dns_count -= 1\n import salt.log\n msg = ('Master hostname: \\'{0}\\' not found or not responsive. '\n 'Retrying in {1} seconds').format(opts['master'], opts['retry_dns'])\n if salt.log.setup.is_console_configured():\n log.error(msg)\n else:\n print('WARNING: {0}'.format(msg))\n time.sleep(opts['retry_dns'])\n try:\n ret['master_ip'] = salt.utils.network.dns_check(\n opts['master'],\n int(opts['master_port']),\n True,\n opts['ipv6'],\n attempt_connect=False)\n break\n except SaltClientError:\n pass\n else:\n if fallback:\n ret['master_ip'] = '127.0.0.1'\n else:\n raise\n except SaltSystemExit:\n unknown_str = 'unknown address'\n master = opts.get('master', unknown_str)\n if master == '':\n master = unknown_str\n if opts.get('__role') == 'syndic':\n err = 'Master address: \\'{0}\\' could not be resolved. Invalid or unresolveable address. ' \\\n 'Set \\'syndic_master\\' value in minion config.'.format(master)\n else:\n err = 'Master address: \\'{0}\\' could not be resolved. Invalid or unresolveable address. ' \\\n 'Set \\'master\\' value in minion config.'.format(master)\n log.error(err)\n raise SaltSystemExit(code=42, msg=err)\n else:\n ret['master_ip'] = '127.0.0.1'\n\n if 'master_ip' in ret and 'master_ip' in opts:\n if ret['master_ip'] != opts['master_ip']:\n log.warning(\n 'Master ip address changed from %s to %s',\n opts['master_ip'], ret['master_ip']\n )\n if opts['source_interface_name']:\n log.trace('Custom source interface required: %s', opts['source_interface_name'])\n interfaces = salt.utils.network.interfaces()\n log.trace('The following interfaces are available on this Minion:')\n log.trace(interfaces)\n if opts['source_interface_name'] in interfaces:\n if interfaces[opts['source_interface_name']]['up']:\n addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\\\n interfaces[opts['source_interface_name']]['inet6']\n ret['source_ip'] = addrs[0]['address']\n log.debug('Using %s as source IP address', ret['source_ip'])\n else:\n log.warning('The interface %s is down so it cannot be used as source to connect to the Master',\n opts['source_interface_name'])\n else:\n log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name'])\n elif opts['source_address']:\n ret['source_ip'] = salt.utils.network.dns_check(\n opts['source_address'],\n int(opts['source_ret_port']),\n True,\n opts['ipv6'],\n attempt_connect=False)\n log.debug('Using %s as source IP address', ret['source_ip'])\n if opts['source_ret_port']:\n ret['source_ret_port'] = int(opts['source_ret_port'])\n log.debug('Using %d as source port for the ret server', ret['source_ret_port'])\n if opts['source_publish_port']:\n ret['source_publish_port'] = int(opts['source_publish_port'])\n log.debug('Using %d as source port for the master pub', ret['source_publish_port'])\n ret['master_uri'] = 'tcp://{ip}:{port}'.format(\n ip=ret['master_ip'], port=opts['master_port'])\n log.debug('Master URI: %s', ret['master_uri'])\n\n return ret\n", "def prep_ip_port(opts):\n '''\n parse host:port values from opts['master'] and return valid:\n master: ip address or hostname as a string\n master_port: (optional) master returner port as integer\n\n e.g.:\n - master: 'localhost:1234' -> {'master': 'localhost', 'master_port': 1234}\n - master: '127.0.0.1:1234' -> {'master': '127.0.0.1', 'master_port' :1234}\n - master: '[::1]:1234' -> {'master': '::1', 'master_port': 1234}\n - master: 'fe80::a00:27ff:fedc:ba98' -> {'master': 'fe80::a00:27ff:fedc:ba98'}\n '''\n ret = {}\n # Use given master IP if \"ip_only\" is set or if master_ip is an ipv6 address without\n # a port specified. The is_ipv6 check returns False if brackets are used in the IP\n # definition such as master: '[::1]:1234'.\n if opts['master_uri_format'] == 'ip_only':\n ret['master'] = ipaddress.ip_address(opts['master'])\n else:\n host, port = parse_host_port(opts['master'])\n ret = {'master': host}\n if port:\n ret.update({'master_port': port})\n\n return ret\n", "def factory(cls, opts, **kwargs):\n # Default to ZeroMQ for now\n ttype = 'zeromq'\n\n # determine the ttype\n if 'transport' in opts:\n ttype = opts['transport']\n elif 'transport' in opts.get('pillar', {}).get('master', {}):\n ttype = opts['pillar']['master']['transport']\n\n # switch on available ttypes\n if ttype == 'detect':\n opts['detect_mode'] = True\n log.info('Transport is set to detect; using %s', ttype)\n if ttype == 'zeromq':\n import salt.transport.zeromq\n return salt.transport.zeromq.AsyncZeroMQPubChannel(opts, **kwargs)\n elif ttype == 'tcp':\n if not cls._resolver_configured:\n # TODO: add opt to specify number of resolver threads\n AsyncChannel._config_resolver()\n import salt.transport.tcp\n return salt.transport.tcp.AsyncTCPPubChannel(opts, **kwargs)\n elif ttype == 'local': # TODO:\n import salt.transport.local\n return salt.transport.local.AsyncLocalPubChannel(opts, **kwargs)\n else:\n raise Exception(\n 'Channels are only defined for tcp, zeromq, and local'\n )\n" ]
class MinionBase(object): def __init__(self, opts): self.opts = opts @staticmethod def process_schedule(minion, loop_interval): try: if hasattr(minion, 'schedule'): minion.schedule.eval() else: log.error('Minion scheduler not initialized. Scheduled jobs will not be run.') return # Check if scheduler requires lower loop interval than # the loop_interval setting if minion.schedule.loop_interval < loop_interval: loop_interval = minion.schedule.loop_interval log.debug( 'Overriding loop_interval because of scheduled jobs.' ) except Exception as exc: log.error('Exception %s occurred in scheduled job', exc) return loop_interval def process_beacons(self, functions): ''' Evaluate all of the configured beacons, grab the config again in case the pillar or grains changed ''' if 'config.merge' in functions: b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True) if b_conf: return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member return [] @tornado.gen.coroutine def _discover_masters(self): ''' Discover master(s) and decide where to connect, if SSDP is around. This modifies the configuration on the fly. :return: ''' if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False: master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient() masters = {} for att in range(self.opts['discovery'].get('attempts', 3)): try: att += 1 log.info('Attempting %s time(s) to discover masters', att) masters.update(master_discovery_client.discover()) if not masters: time.sleep(self.opts['discovery'].get('pause', 5)) else: break except Exception as err: log.error('SSDP discovery failure: %s', err) break if masters: policy = self.opts.get('discovery', {}).get('match', 'any') if policy not in ['any', 'all']: log.error('SSDP configuration matcher failure: unknown value "%s". ' 'Should be "any" or "all"', policy) return mapping = self.opts['discovery'].get('mapping', {}) discovered = [] for addr, mappings in masters.items(): for proto_data in mappings: cnt = len([key for key, value in mapping.items() if proto_data.get('mapping', {}).get(key) == value]) if policy == 'any' and bool(cnt) or cnt == len(mapping): if self.opts['discovery'].get('multimaster'): discovered.append(proto_data['master']) else: self.opts['master'] = proto_data['master'] return self.opts['master'] = discovered def _return_retry_timer(self): ''' Based on the minion configuration, either return a randomized timer or just return the value of the return_retry_timer. ''' msg = 'Minion return retry timer set to %s seconds' if self.opts.get('return_retry_timer_max'): try: random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max']) retry_msg = msg % random_retry log.debug('%s (randomized)', msg % random_retry) return random_retry except ValueError: # Catch wiseguys using negative integers here log.error( 'Invalid value (return_retry_timer: %s or ' 'return_retry_timer_max: %s). Both must be positive ' 'integers.', self.opts['return_retry_timer'], self.opts['return_retry_timer_max'], ) log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer']) return DEFAULT_MINION_OPTS['return_retry_timer'] else: log.debug(msg, self.opts.get('return_retry_timer')) return self.opts.get('return_retry_timer')
saltstack/salt
salt/minion.py
MinionBase._discover_masters
python
def _discover_masters(self): ''' Discover master(s) and decide where to connect, if SSDP is around. This modifies the configuration on the fly. :return: ''' if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False: master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient() masters = {} for att in range(self.opts['discovery'].get('attempts', 3)): try: att += 1 log.info('Attempting %s time(s) to discover masters', att) masters.update(master_discovery_client.discover()) if not masters: time.sleep(self.opts['discovery'].get('pause', 5)) else: break except Exception as err: log.error('SSDP discovery failure: %s', err) break if masters: policy = self.opts.get('discovery', {}).get('match', 'any') if policy not in ['any', 'all']: log.error('SSDP configuration matcher failure: unknown value "%s". ' 'Should be "any" or "all"', policy) return mapping = self.opts['discovery'].get('mapping', {}) discovered = [] for addr, mappings in masters.items(): for proto_data in mappings: cnt = len([key for key, value in mapping.items() if proto_data.get('mapping', {}).get(key) == value]) if policy == 'any' and bool(cnt) or cnt == len(mapping): if self.opts['discovery'].get('multimaster'): discovered.append(proto_data['master']) else: self.opts['master'] = proto_data['master'] return self.opts['master'] = discovered
Discover master(s) and decide where to connect, if SSDP is around. This modifies the configuration on the fly. :return:
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L731-L771
[ "def discover(self):\n '''\n Gather the information of currently declared servers.\n\n :return:\n '''\n response = {}\n masters = {}\n self.log.info(\"Looking for a server discovery\")\n self._query()\n self._collect_masters_map(response)\n if not response:\n msg = 'No master has been discovered.'\n self.log.info(msg)\n else:\n for addr, descriptions in response.items():\n for data in descriptions: # Several masters can run at the same machine.\n msg = salt.utils.stringutils.to_unicode(data)\n if msg.startswith(self.signature):\n msg = msg.split(self.signature)[-1]\n self.log.debug(\n \"Service announcement at '%s:%s'. Response: '%s'\",\n addr[0], addr[1], msg\n )\n if ':E:' in msg:\n err = msg.split(':E:')[-1]\n self.log.error(\n 'Error response from the service publisher at %s: %s',\n addr, err\n )\n if \"timestamp\" in err:\n self.log.error('Publisher sent shifted timestamp from %s', addr)\n else:\n if addr not in masters:\n masters[addr] = []\n masters[addr].append(\n salt.utils.json.loads(msg.split(':@:')[-1], _json_module=_json)\n )\n return masters\n" ]
class MinionBase(object): def __init__(self, opts): self.opts = opts @staticmethod def process_schedule(minion, loop_interval): try: if hasattr(minion, 'schedule'): minion.schedule.eval() else: log.error('Minion scheduler not initialized. Scheduled jobs will not be run.') return # Check if scheduler requires lower loop interval than # the loop_interval setting if minion.schedule.loop_interval < loop_interval: loop_interval = minion.schedule.loop_interval log.debug( 'Overriding loop_interval because of scheduled jobs.' ) except Exception as exc: log.error('Exception %s occurred in scheduled job', exc) return loop_interval def process_beacons(self, functions): ''' Evaluate all of the configured beacons, grab the config again in case the pillar or grains changed ''' if 'config.merge' in functions: b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True) if b_conf: return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member return [] @tornado.gen.coroutine def eval_master(self, opts, timeout=60, safe=True, failed=False, failback=False): ''' Evaluates and returns a tuple of the current master address and the pub_channel. In standard mode, just creates a pub_channel with the given master address. With master_type=func evaluates the current master address from the given module and then creates a pub_channel. With master_type=failover takes the list of masters and loops through them. The first one that allows the minion to create a pub_channel is then returned. If this function is called outside the minions initialization phase (for example from the minions main event-loop when a master connection loss was detected), 'failed' should be set to True. The current (possibly failed) master will then be removed from the list of masters. ''' # return early if we are not connecting to a master if opts['master_type'] == 'disable': log.warning('Master is set to disable, skipping connection') self.connected = False raise tornado.gen.Return((None, None)) # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. # if we are using multimaster, discovery can only happen at start time # because MinionManager handles it. by eval_master time the minion doesn't # know about other siblings currently running if isinstance(self.opts['discovery'], dict) and not self.opts['discovery'].get('multimaster'): self._discover_masters() # check if master_type was altered from its default if opts['master_type'] != 'str' and opts['__role'] != 'syndic': # check for a valid keyword if opts['master_type'] == 'func': eval_master_func(opts) # if failover or distributed is set, master has to be of type list elif opts['master_type'] in ('failover', 'distributed'): if isinstance(opts['master'], list): log.info( 'Got list of available master addresses: %s', opts['master'] ) if opts['master_type'] == 'distributed': master_len = len(opts['master']) if master_len > 1: secondary_masters = opts['master'][1:] master_idx = crc32(opts['id']) % master_len try: preferred_masters = opts['master'] preferred_masters[0] = opts['master'][master_idx] preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]] opts['master'] = preferred_masters log.info('Distributed to the master at \'%s\'.', opts['master'][0]) except (KeyError, AttributeError, TypeError): log.warning('Failed to distribute to a specific master.') else: log.warning('master_type = distributed needs more than 1 master.') if opts['master_shuffle']: log.warning( 'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor ' 'of \'random_master\'. Please update your minion config file.' ) opts['random_master'] = opts['master_shuffle'] opts['auth_tries'] = 0 if opts['master_failback'] and opts['master_failback_interval'] == 0: opts['master_failback_interval'] = opts['master_alive_interval'] # if opts['master'] is a str and we have never created opts['master_list'] elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts): # We have a string, but a list was what was intended. Convert. # See issue 23611 for details opts['master'] = [opts['master']] elif opts['__role'] == 'syndic': log.info('Syndic setting master_syndic to \'%s\'', opts['master']) # if failed=True, the minion was previously connected # we're probably called from the minions main-event-loop # because a master connection loss was detected. remove # the possibly failed master from the list of masters. elif failed: if failback: # failback list of masters to original config opts['master'] = opts['master_list'] else: log.info( 'Moving possibly failed master %s to the end of ' 'the list of masters', opts['master'] ) if opts['master'] in opts['local_masters']: # create new list of master with the possibly failed # one moved to the end failed_master = opts['master'] opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x] opts['master'].append(failed_master) else: opts['master'] = opts['master_list'] else: msg = ('master_type set to \'failover\' but \'master\' ' 'is not of type list but of type ' '{0}'.format(type(opts['master']))) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # If failover is set, minion have to failover on DNS errors instead of retry DNS resolve. # See issue 21082 for details if opts['retry_dns'] and opts['master_type'] == 'failover': msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. ' 'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.') log.critical(msg) opts['retry_dns'] = 0 else: msg = ('Invalid keyword \'{0}\' for variable ' '\'master_type\''.format(opts['master_type'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # FIXME: if SMinion don't define io_loop, it can't switch master see #29088 # Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop # (The channel factories will set a default if the kwarg isn't passed) factory_kwargs = {'timeout': timeout, 'safe': safe} if getattr(self, 'io_loop', None): factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member tries = opts.get('master_tries', 1) attempts = 0 # if we have a list of masters, loop through them and be # happy with the first one that allows us to connect if isinstance(opts['master'], list): conn = False last_exc = None opts['master_uri_list'] = [] opts['local_masters'] = copy.copy(opts['master']) # shuffle the masters and then loop through them if opts['random_master']: # master_failback is only used when master_type is set to failover if opts['master_type'] == 'failover' and opts['master_failback']: secondary_masters = opts['local_masters'][1:] shuffle(secondary_masters) opts['local_masters'][1:] = secondary_masters else: shuffle(opts['local_masters']) # This sits outside of the connection loop below because it needs to set # up a list of master URIs regardless of which masters are available # to connect _to_. This is primarily used for masterless mode, when # we need a list of master URIs to fire calls back to. for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts['master_uri_list'].append(resolve_dns(opts)['master_uri']) while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) # on first run, update self.opts with the whole master list # to enable a minion to re-use old masters if they get fixed if 'master_list' not in opts: opts['master_list'] = copy.copy(opts['local_masters']) self.opts = opts pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs) try: yield pub_channel.connect() conn = True break except SaltClientError as exc: last_exc = exc if exc.strerror.startswith('Could not access'): msg = ( 'Failed to initiate connection with Master ' '%s: check ownership/permissions. Error ' 'message: %s', opts['master'], exc ) else: msg = ('Master %s could not be reached, trying next ' 'next master (if any)', opts['master']) log.info(msg) continue if not conn: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False self.opts['master'] = copy.copy(self.opts['local_masters']) log.error( 'No master could be reached or all masters ' 'denied the minion\'s connection attempt.' ) # If the code reaches this point, 'last_exc' # should already be set. raise last_exc # pylint: disable=E0702 else: self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) # single master sign in else: if opts['random_master']: log.warning('random_master is True but there is only one master specified. Ignoring.') while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) try: if self.opts['transport'] == 'detect': self.opts['detect_mode'] = True for trans in ('zeromq', 'tcp'): if trans == 'zeromq' and not zmq: continue self.opts['transport'] = trans pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() if not pub_channel.auth.authenticated: continue del self.opts['detect_mode'] break else: pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) except SaltClientError as exc: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False raise exc def _return_retry_timer(self): ''' Based on the minion configuration, either return a randomized timer or just return the value of the return_retry_timer. ''' msg = 'Minion return retry timer set to %s seconds' if self.opts.get('return_retry_timer_max'): try: random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max']) retry_msg = msg % random_retry log.debug('%s (randomized)', msg % random_retry) return random_retry except ValueError: # Catch wiseguys using negative integers here log.error( 'Invalid value (return_retry_timer: %s or ' 'return_retry_timer_max: %s). Both must be positive ' 'integers.', self.opts['return_retry_timer'], self.opts['return_retry_timer_max'], ) log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer']) return DEFAULT_MINION_OPTS['return_retry_timer'] else: log.debug(msg, self.opts.get('return_retry_timer')) return self.opts.get('return_retry_timer')
saltstack/salt
salt/minion.py
MinionBase._return_retry_timer
python
def _return_retry_timer(self): ''' Based on the minion configuration, either return a randomized timer or just return the value of the return_retry_timer. ''' msg = 'Minion return retry timer set to %s seconds' if self.opts.get('return_retry_timer_max'): try: random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max']) retry_msg = msg % random_retry log.debug('%s (randomized)', msg % random_retry) return random_retry except ValueError: # Catch wiseguys using negative integers here log.error( 'Invalid value (return_retry_timer: %s or ' 'return_retry_timer_max: %s). Both must be positive ' 'integers.', self.opts['return_retry_timer'], self.opts['return_retry_timer_max'], ) log.debug(msg, DEFAULT_MINION_OPTS['return_retry_timer']) return DEFAULT_MINION_OPTS['return_retry_timer'] else: log.debug(msg, self.opts.get('return_retry_timer')) return self.opts.get('return_retry_timer')
Based on the minion configuration, either return a randomized timer or just return the value of the return_retry_timer.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L773-L798
null
class MinionBase(object): def __init__(self, opts): self.opts = opts @staticmethod def process_schedule(minion, loop_interval): try: if hasattr(minion, 'schedule'): minion.schedule.eval() else: log.error('Minion scheduler not initialized. Scheduled jobs will not be run.') return # Check if scheduler requires lower loop interval than # the loop_interval setting if minion.schedule.loop_interval < loop_interval: loop_interval = minion.schedule.loop_interval log.debug( 'Overriding loop_interval because of scheduled jobs.' ) except Exception as exc: log.error('Exception %s occurred in scheduled job', exc) return loop_interval def process_beacons(self, functions): ''' Evaluate all of the configured beacons, grab the config again in case the pillar or grains changed ''' if 'config.merge' in functions: b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True) if b_conf: return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member return [] @tornado.gen.coroutine def eval_master(self, opts, timeout=60, safe=True, failed=False, failback=False): ''' Evaluates and returns a tuple of the current master address and the pub_channel. In standard mode, just creates a pub_channel with the given master address. With master_type=func evaluates the current master address from the given module and then creates a pub_channel. With master_type=failover takes the list of masters and loops through them. The first one that allows the minion to create a pub_channel is then returned. If this function is called outside the minions initialization phase (for example from the minions main event-loop when a master connection loss was detected), 'failed' should be set to True. The current (possibly failed) master will then be removed from the list of masters. ''' # return early if we are not connecting to a master if opts['master_type'] == 'disable': log.warning('Master is set to disable, skipping connection') self.connected = False raise tornado.gen.Return((None, None)) # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. # if we are using multimaster, discovery can only happen at start time # because MinionManager handles it. by eval_master time the minion doesn't # know about other siblings currently running if isinstance(self.opts['discovery'], dict) and not self.opts['discovery'].get('multimaster'): self._discover_masters() # check if master_type was altered from its default if opts['master_type'] != 'str' and opts['__role'] != 'syndic': # check for a valid keyword if opts['master_type'] == 'func': eval_master_func(opts) # if failover or distributed is set, master has to be of type list elif opts['master_type'] in ('failover', 'distributed'): if isinstance(opts['master'], list): log.info( 'Got list of available master addresses: %s', opts['master'] ) if opts['master_type'] == 'distributed': master_len = len(opts['master']) if master_len > 1: secondary_masters = opts['master'][1:] master_idx = crc32(opts['id']) % master_len try: preferred_masters = opts['master'] preferred_masters[0] = opts['master'][master_idx] preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]] opts['master'] = preferred_masters log.info('Distributed to the master at \'%s\'.', opts['master'][0]) except (KeyError, AttributeError, TypeError): log.warning('Failed to distribute to a specific master.') else: log.warning('master_type = distributed needs more than 1 master.') if opts['master_shuffle']: log.warning( 'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor ' 'of \'random_master\'. Please update your minion config file.' ) opts['random_master'] = opts['master_shuffle'] opts['auth_tries'] = 0 if opts['master_failback'] and opts['master_failback_interval'] == 0: opts['master_failback_interval'] = opts['master_alive_interval'] # if opts['master'] is a str and we have never created opts['master_list'] elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts): # We have a string, but a list was what was intended. Convert. # See issue 23611 for details opts['master'] = [opts['master']] elif opts['__role'] == 'syndic': log.info('Syndic setting master_syndic to \'%s\'', opts['master']) # if failed=True, the minion was previously connected # we're probably called from the minions main-event-loop # because a master connection loss was detected. remove # the possibly failed master from the list of masters. elif failed: if failback: # failback list of masters to original config opts['master'] = opts['master_list'] else: log.info( 'Moving possibly failed master %s to the end of ' 'the list of masters', opts['master'] ) if opts['master'] in opts['local_masters']: # create new list of master with the possibly failed # one moved to the end failed_master = opts['master'] opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x] opts['master'].append(failed_master) else: opts['master'] = opts['master_list'] else: msg = ('master_type set to \'failover\' but \'master\' ' 'is not of type list but of type ' '{0}'.format(type(opts['master']))) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # If failover is set, minion have to failover on DNS errors instead of retry DNS resolve. # See issue 21082 for details if opts['retry_dns'] and opts['master_type'] == 'failover': msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. ' 'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.') log.critical(msg) opts['retry_dns'] = 0 else: msg = ('Invalid keyword \'{0}\' for variable ' '\'master_type\''.format(opts['master_type'])) log.error(msg) sys.exit(salt.defaults.exitcodes.EX_GENERIC) # FIXME: if SMinion don't define io_loop, it can't switch master see #29088 # Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop # (The channel factories will set a default if the kwarg isn't passed) factory_kwargs = {'timeout': timeout, 'safe': safe} if getattr(self, 'io_loop', None): factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member tries = opts.get('master_tries', 1) attempts = 0 # if we have a list of masters, loop through them and be # happy with the first one that allows us to connect if isinstance(opts['master'], list): conn = False last_exc = None opts['master_uri_list'] = [] opts['local_masters'] = copy.copy(opts['master']) # shuffle the masters and then loop through them if opts['random_master']: # master_failback is only used when master_type is set to failover if opts['master_type'] == 'failover' and opts['master_failback']: secondary_masters = opts['local_masters'][1:] shuffle(secondary_masters) opts['local_masters'][1:] = secondary_masters else: shuffle(opts['local_masters']) # This sits outside of the connection loop below because it needs to set # up a list of master URIs regardless of which masters are available # to connect _to_. This is primarily used for masterless mode, when # we need a list of master URIs to fire calls back to. for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts['master_uri_list'].append(resolve_dns(opts)['master_uri']) while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) for master in opts['local_masters']: opts['master'] = master opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) # on first run, update self.opts with the whole master list # to enable a minion to re-use old masters if they get fixed if 'master_list' not in opts: opts['master_list'] = copy.copy(opts['local_masters']) self.opts = opts pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs) try: yield pub_channel.connect() conn = True break except SaltClientError as exc: last_exc = exc if exc.strerror.startswith('Could not access'): msg = ( 'Failed to initiate connection with Master ' '%s: check ownership/permissions. Error ' 'message: %s', opts['master'], exc ) else: msg = ('Master %s could not be reached, trying next ' 'next master (if any)', opts['master']) log.info(msg) continue if not conn: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False self.opts['master'] = copy.copy(self.opts['local_masters']) log.error( 'No master could be reached or all masters ' 'denied the minion\'s connection attempt.' ) # If the code reaches this point, 'last_exc' # should already be set. raise last_exc # pylint: disable=E0702 else: self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) # single master sign in else: if opts['random_master']: log.warning('random_master is True but there is only one master specified. Ignoring.') while True: if attempts != 0: # Give up a little time between connection attempts # to allow the IOLoop to run any other scheduled tasks. yield tornado.gen.sleep(opts['acceptance_wait_time']) attempts += 1 if tries > 0: log.debug( 'Connecting to master. Attempt %s of %s', attempts, tries ) else: log.debug( 'Connecting to master. Attempt %s (infinite attempts)', attempts ) opts.update(prep_ip_port(opts)) opts.update(resolve_dns(opts)) try: if self.opts['transport'] == 'detect': self.opts['detect_mode'] = True for trans in ('zeromq', 'tcp'): if trans == 'zeromq' and not zmq: continue self.opts['transport'] = trans pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() if not pub_channel.auth.authenticated: continue del self.opts['detect_mode'] break else: pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs) yield pub_channel.connect() self.tok = pub_channel.auth.gen_token(b'salt') self.connected = True raise tornado.gen.Return((opts['master'], pub_channel)) except SaltClientError as exc: if attempts == tries: # Exhausted all attempts. Return exception. self.connected = False raise exc def _discover_masters(self): ''' Discover master(s) and decide where to connect, if SSDP is around. This modifies the configuration on the fly. :return: ''' if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False: master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient() masters = {} for att in range(self.opts['discovery'].get('attempts', 3)): try: att += 1 log.info('Attempting %s time(s) to discover masters', att) masters.update(master_discovery_client.discover()) if not masters: time.sleep(self.opts['discovery'].get('pause', 5)) else: break except Exception as err: log.error('SSDP discovery failure: %s', err) break if masters: policy = self.opts.get('discovery', {}).get('match', 'any') if policy not in ['any', 'all']: log.error('SSDP configuration matcher failure: unknown value "%s". ' 'Should be "any" or "all"', policy) return mapping = self.opts['discovery'].get('mapping', {}) discovered = [] for addr, mappings in masters.items(): for proto_data in mappings: cnt = len([key for key, value in mapping.items() if proto_data.get('mapping', {}).get(key) == value]) if policy == 'any' and bool(cnt) or cnt == len(mapping): if self.opts['discovery'].get('multimaster'): discovered.append(proto_data['master']) else: self.opts['master'] = proto_data['master'] return self.opts['master'] = discovered
saltstack/salt
salt/minion.py
SMinion.gen_modules
python
def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.serializers = salt.loader.serializers(self.opts) self.returners = salt.loader.returners(self.opts, self.functions) self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None) # TODO: remove self.function_errors = {} # Keep the funcs clean self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers) self.rend = salt.loader.render(self.opts, self.functions) # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts)
Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L847-L880
[ "def get_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None,\n pillar_override=None, pillarenv=None, extra_minion_data=None):\n '''\n Return the correct pillar driver based on the file_client option\n '''\n file_client = opts['file_client']\n if opts.get('master_type') == 'disable' and file_client == 'remote':\n file_client = 'local'\n ptype = {\n 'remote': RemotePillar,\n 'local': Pillar\n }.get(file_client, Pillar)\n # If local pillar and we're caching, run through the cache system first\n log.debug('Determining pillar cache')\n if opts['pillar_cache']:\n log.info('Compiling pillar from cache')\n log.debug('get_pillar using pillar cache with ext: %s', ext)\n return PillarCache(opts, grains, minion_id, saltenv, ext=ext, functions=funcs,\n pillar_override=pillar_override, pillarenv=pillarenv)\n return ptype(opts, grains, minion_id, saltenv, ext, functions=funcs,\n pillar_override=pillar_override, pillarenv=pillarenv,\n extra_minion_data=extra_minion_data)\n", "def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs\n '''\n Compile pillar and set it to the cache, if not found.\n\n :param args:\n :param kwargs:\n :return:\n '''\n log.debug('Scanning pillar cache for information about minion %s and pillarenv %s', self.minion_id, self.pillarenv)\n log.debug('Scanning cache for minion %s: %s', self.minion_id, self.cache[self.minion_id] or '*empty*')\n\n # Check the cache!\n if self.minion_id in self.cache: # Keyed by minion_id\n # TODO Compare grains, etc?\n if self.pillarenv in self.cache[self.minion_id]:\n # We have a cache hit! Send it back.\n log.debug('Pillar cache hit for minion %s and pillarenv %s', self.minion_id, self.pillarenv)\n pillar_data = self.cache[self.minion_id][self.pillarenv]\n else:\n # We found the minion but not the env. Store it.\n pillar_data = self.fetch_pillar()\n self.cache[self.minion_id][self.pillarenv] = pillar_data\n self.cache.store()\n log.debug('Pillar cache miss for pillarenv %s for minion %s', self.pillarenv, self.minion_id)\n else:\n # We haven't seen this minion yet in the cache. Store it.\n pillar_data = self.fetch_pillar()\n self.cache[self.minion_id] = {self.pillarenv: pillar_data}\n log.debug('Pillar cache has been added for minion %s', self.minion_id)\n log.debug('Current pillar cache: %s', self.cache[self.minion_id])\n\n # we dont want the pillar_override baked into the cached fetch_pillar from above\n if self.pillar_override:\n pillar_data = merge(\n pillar_data,\n self.pillar_override,\n self.opts.get('pillar_source_merging_strategy', 'smart'),\n self.opts.get('renderer', 'yaml'),\n self.opts.get('pillar_merge_lists', False))\n pillar_data.update(self.pillar_override)\n\n return pillar_data\n" ]
class SMinion(MinionBase): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. ''' def __init__(self, opts): # Late setup of the opts grains, so we can log from the grains module import salt.loader opts['grains'] = salt.loader.grains(opts) super(SMinion, self).__init__(opts) # run ssdp discovery if necessary self._discover_masters() # Clean out the proc directory (default /var/cache/salt/minion/proc) if (self.opts.get('file_client', 'remote') == 'remote' or self.opts.get('use_master_when_local', False)): install_zmq() io_loop = ZMQDefaultLoop.current() io_loop.run_sync( lambda: self.eval_master(self.opts, failed=True) ) self.gen_modules(initial_load=True) # If configured, cache pillar data on the minion if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False): import salt.utils.yaml pdir = os.path.join(self.opts['cachedir'], 'pillar') if not os.path.isdir(pdir): os.makedirs(pdir, 0o700) ptop = os.path.join(pdir, 'top.sls') if self.opts['saltenv'] is not None: penv = self.opts['saltenv'] else: penv = 'base' cache_top = {penv: {self.opts['id']: ['cache']}} with salt.utils.files.fopen(ptop, 'wb') as fp_: salt.utils.yaml.safe_dump(cache_top, fp_) os.chmod(ptop, 0o600) cache_sls = os.path.join(pdir, 'cache.sls') with salt.utils.files.fopen(cache_sls, 'wb') as fp_: salt.utils.yaml.safe_dump(self.opts['pillar'], fp_) os.chmod(cache_sls, 0o600)
saltstack/salt
salt/minion.py
MasterMinion.gen_modules
python
def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods( self.opts, utils=self.utils, whitelist=self.whitelist, initial_load=initial_load) self.serializers = salt.loader.serializers(self.opts) if self.mk_returners: self.returners = salt.loader.returners(self.opts, self.functions) if self.mk_states: self.states = salt.loader.states(self.opts, self.functions, self.utils, self.serializers) if self.mk_rend: self.rend = salt.loader.render(self.opts, self.functions) if self.mk_matcher: self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules
Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L913-L941
null
class MasterMinion(object): ''' Create a fully loaded minion function object for generic use on the master. What makes this class different is that the pillar is omitted, otherwise everything else is loaded cleanly. ''' def __init__( self, opts, returners=True, states=True, rend=True, matcher=True, whitelist=None, ignore_config_errors=True): self.opts = salt.config.minion_config( opts['conf_file'], ignore_config_errors=ignore_config_errors, role='master' ) self.opts.update(opts) self.whitelist = whitelist self.opts['grains'] = salt.loader.grains(opts) self.opts['pillar'] = {} self.mk_returners = returners self.mk_states = states self.mk_rend = rend self.mk_matcher = matcher self.gen_modules(initial_load=True)
saltstack/salt
salt/minion.py
MinionManager._create_minion_object
python
def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None): ''' Helper function to return the correct type of object ''' return Minion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue)
Helper function to return the correct type of object
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L979-L990
null
class MinionManager(MinionBase): ''' Create a multi minion interface, this creates as many minions as are defined in the master option and binds each minion object to a respective master. ''' def __init__(self, opts): super(MinionManager, self).__init__(opts) self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self.minions = [] self.jid_queue = [] install_zmq() self.io_loop = ZMQDefaultLoop.current() self.process_manager = ProcessManager(name='MultiMinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat def __del__(self): self.destroy() def _bind(self): # start up the event publisher, so we can see events during startup self.event_publisher = salt.utils.event.AsyncEventPublisher( self.opts, io_loop=self.io_loop, ) self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop) self.event.subscribe('') self.event.set_event_handler(self.handle_event) @tornado.gen.coroutine def handle_event(self, package): yield [minion.handle_event(package) for minion in self.minions] def _check_minions(self): ''' Check the size of self.minions and raise an error if it's empty ''' if not self.minions: err = ('Minion unable to successfully connect to ' 'a Salt Master.') log.error(err) def _spawn_minions(self, timeout=60): ''' Spawn all the coroutines which will sign in to masters ''' # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. If match is 'any' we let # eval_master handle the discovery instead so disconnections can also handle # discovery if isinstance(self.opts['discovery'], dict) and self.opts['discovery'].get('multimaster'): self._discover_masters() masters = self.opts['master'] if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list): masters = [masters] for master in masters: s_opts = copy.deepcopy(self.opts) s_opts['master'] = master s_opts['multimaster'] = True minion = self._create_minion_object(s_opts, s_opts['auth_timeout'], False, io_loop=self.io_loop, loaded_base_name='salt.loader.{0}'.format(s_opts['master']), jid_queue=self.jid_queue) self.io_loop.spawn_callback(self._connect_minion, minion) self.io_loop.call_later(timeout, self._check_minions) @tornado.gen.coroutine def _connect_minion(self, minion): ''' Create a minion, and asynchronously connect it to a master ''' auth_wait = minion.opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", minion.opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? try: if minion.opts.get('beacons_before_connect', False): minion.setup_beacons(before_connect=True) if minion.opts.get('scheduler_before_connect', False): minion.setup_scheduler(before_connect=True) yield minion.connect_master(failed=failed) minion.tune_in(start=False) self.minions.append(minion) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up minion for multi-master. Is ' 'master at %s responding?', minion.opts['master'] ) except SaltMasterUnresolvableError: err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'master\' value in minion config.'.format(minion.opts['master']) log.error(err) break except Exception as e: failed = True log.critical( 'Unexpected error while connecting to %s', minion.opts['master'], exc_info=True ) # Multi Master Tune In def tune_in(self): ''' Bind to the masters This loop will attempt to create connections to masters it hasn't connected to yet, but once the initial connection is made it is up to ZMQ to do the reconnect (don't know of an API to get the state here in salt) ''' self._bind() # Fire off all the minion coroutines self._spawn_minions() # serve forever! self.io_loop.start() @property def restart(self): for minion in self.minions: if minion.restart: return True return False def stop(self, signum): for minion in self.minions: minion.process_manager.stop_restarting() minion.process_manager.send_signal_to_processes(signum) # kill any remaining processes minion.process_manager.kill_children() minion.destroy() def destroy(self): for minion in self.minions: minion.destroy()
saltstack/salt
salt/minion.py
MinionManager._spawn_minions
python
def _spawn_minions(self, timeout=60): ''' Spawn all the coroutines which will sign in to masters ''' # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. If match is 'any' we let # eval_master handle the discovery instead so disconnections can also handle # discovery if isinstance(self.opts['discovery'], dict) and self.opts['discovery'].get('multimaster'): self._discover_masters() masters = self.opts['master'] if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list): masters = [masters] for master in masters: s_opts = copy.deepcopy(self.opts) s_opts['master'] = master s_opts['multimaster'] = True minion = self._create_minion_object(s_opts, s_opts['auth_timeout'], False, io_loop=self.io_loop, loaded_base_name='salt.loader.{0}'.format(s_opts['master']), jid_queue=self.jid_queue) self.io_loop.spawn_callback(self._connect_minion, minion) self.io_loop.call_later(timeout, self._check_minions)
Spawn all the coroutines which will sign in to masters
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L1001-L1027
[ "def _create_minion_object(self, opts, timeout, safe,\n io_loop=None, loaded_base_name=None,\n jid_queue=None):\n '''\n Helper function to return the correct type of object\n '''\n return Minion(opts,\n timeout,\n safe,\n io_loop=io_loop,\n loaded_base_name=loaded_base_name,\n jid_queue=jid_queue)\n" ]
class MinionManager(MinionBase): ''' Create a multi minion interface, this creates as many minions as are defined in the master option and binds each minion object to a respective master. ''' def __init__(self, opts): super(MinionManager, self).__init__(opts) self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self.minions = [] self.jid_queue = [] install_zmq() self.io_loop = ZMQDefaultLoop.current() self.process_manager = ProcessManager(name='MultiMinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat def __del__(self): self.destroy() def _bind(self): # start up the event publisher, so we can see events during startup self.event_publisher = salt.utils.event.AsyncEventPublisher( self.opts, io_loop=self.io_loop, ) self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop) self.event.subscribe('') self.event.set_event_handler(self.handle_event) @tornado.gen.coroutine def handle_event(self, package): yield [minion.handle_event(package) for minion in self.minions] def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None): ''' Helper function to return the correct type of object ''' return Minion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue) def _check_minions(self): ''' Check the size of self.minions and raise an error if it's empty ''' if not self.minions: err = ('Minion unable to successfully connect to ' 'a Salt Master.') log.error(err) @tornado.gen.coroutine def _connect_minion(self, minion): ''' Create a minion, and asynchronously connect it to a master ''' auth_wait = minion.opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", minion.opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? try: if minion.opts.get('beacons_before_connect', False): minion.setup_beacons(before_connect=True) if minion.opts.get('scheduler_before_connect', False): minion.setup_scheduler(before_connect=True) yield minion.connect_master(failed=failed) minion.tune_in(start=False) self.minions.append(minion) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up minion for multi-master. Is ' 'master at %s responding?', minion.opts['master'] ) except SaltMasterUnresolvableError: err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'master\' value in minion config.'.format(minion.opts['master']) log.error(err) break except Exception as e: failed = True log.critical( 'Unexpected error while connecting to %s', minion.opts['master'], exc_info=True ) # Multi Master Tune In def tune_in(self): ''' Bind to the masters This loop will attempt to create connections to masters it hasn't connected to yet, but once the initial connection is made it is up to ZMQ to do the reconnect (don't know of an API to get the state here in salt) ''' self._bind() # Fire off all the minion coroutines self._spawn_minions() # serve forever! self.io_loop.start() @property def restart(self): for minion in self.minions: if minion.restart: return True return False def stop(self, signum): for minion in self.minions: minion.process_manager.stop_restarting() minion.process_manager.send_signal_to_processes(signum) # kill any remaining processes minion.process_manager.kill_children() minion.destroy() def destroy(self): for minion in self.minions: minion.destroy()
saltstack/salt
salt/minion.py
MinionManager._connect_minion
python
def _connect_minion(self, minion): ''' Create a minion, and asynchronously connect it to a master ''' auth_wait = minion.opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", minion.opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? try: if minion.opts.get('beacons_before_connect', False): minion.setup_beacons(before_connect=True) if minion.opts.get('scheduler_before_connect', False): minion.setup_scheduler(before_connect=True) yield minion.connect_master(failed=failed) minion.tune_in(start=False) self.minions.append(minion) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up minion for multi-master. Is ' 'master at %s responding?', minion.opts['master'] ) except SaltMasterUnresolvableError: err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \ 'Set \'master\' value in minion config.'.format(minion.opts['master']) log.error(err) break except Exception as e: failed = True log.critical( 'Unexpected error while connecting to %s', minion.opts['master'], exc_info=True )
Create a minion, and asynchronously connect it to a master
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L1030-L1072
null
class MinionManager(MinionBase): ''' Create a multi minion interface, this creates as many minions as are defined in the master option and binds each minion object to a respective master. ''' def __init__(self, opts): super(MinionManager, self).__init__(opts) self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self.minions = [] self.jid_queue = [] install_zmq() self.io_loop = ZMQDefaultLoop.current() self.process_manager = ProcessManager(name='MultiMinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat def __del__(self): self.destroy() def _bind(self): # start up the event publisher, so we can see events during startup self.event_publisher = salt.utils.event.AsyncEventPublisher( self.opts, io_loop=self.io_loop, ) self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop) self.event.subscribe('') self.event.set_event_handler(self.handle_event) @tornado.gen.coroutine def handle_event(self, package): yield [minion.handle_event(package) for minion in self.minions] def _create_minion_object(self, opts, timeout, safe, io_loop=None, loaded_base_name=None, jid_queue=None): ''' Helper function to return the correct type of object ''' return Minion(opts, timeout, safe, io_loop=io_loop, loaded_base_name=loaded_base_name, jid_queue=jid_queue) def _check_minions(self): ''' Check the size of self.minions and raise an error if it's empty ''' if not self.minions: err = ('Minion unable to successfully connect to ' 'a Salt Master.') log.error(err) def _spawn_minions(self, timeout=60): ''' Spawn all the coroutines which will sign in to masters ''' # Run masters discovery over SSDP. This may modify the whole configuration, # depending of the networking and sets of masters. If match is 'any' we let # eval_master handle the discovery instead so disconnections can also handle # discovery if isinstance(self.opts['discovery'], dict) and self.opts['discovery'].get('multimaster'): self._discover_masters() masters = self.opts['master'] if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list): masters = [masters] for master in masters: s_opts = copy.deepcopy(self.opts) s_opts['master'] = master s_opts['multimaster'] = True minion = self._create_minion_object(s_opts, s_opts['auth_timeout'], False, io_loop=self.io_loop, loaded_base_name='salt.loader.{0}'.format(s_opts['master']), jid_queue=self.jid_queue) self.io_loop.spawn_callback(self._connect_minion, minion) self.io_loop.call_later(timeout, self._check_minions) @tornado.gen.coroutine # Multi Master Tune In def tune_in(self): ''' Bind to the masters This loop will attempt to create connections to masters it hasn't connected to yet, but once the initial connection is made it is up to ZMQ to do the reconnect (don't know of an API to get the state here in salt) ''' self._bind() # Fire off all the minion coroutines self._spawn_minions() # serve forever! self.io_loop.start() @property def restart(self): for minion in self.minions: if minion.restart: return True return False def stop(self, signum): for minion in self.minions: minion.process_manager.stop_restarting() minion.process_manager.send_signal_to_processes(signum) # kill any remaining processes minion.process_manager.kill_children() minion.destroy() def destroy(self): for minion in self.minions: minion.destroy()
saltstack/salt
salt/minion.py
Minion.sync_connect_master
python
def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master')
Block until we are connected to a master
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L1209-L1238
[ "def destroy(self):\n '''\n Tear down the minion\n '''\n if self._running is False:\n return\n\n self._running = False\n if hasattr(self, 'schedule'):\n del self.schedule\n if hasattr(self, 'pub_channel') and self.pub_channel is not None:\n self.pub_channel.on_recv(None)\n if hasattr(self.pub_channel, 'close'):\n self.pub_channel.close()\n del self.pub_channel\n if hasattr(self, 'periodic_callbacks'):\n for cb in six.itervalues(self.periodic_callbacks):\n cb.stop()\n" ]
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion.connect_master
python
def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master)
Return a future which will complete when you are connected to a master
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L1241-L1246
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._post_master_init
python
def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True)
Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L1250-L1346
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._prep_mod_opts
python
def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts
Returns a copy of the opts with key bits stripped out
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L1348-L1357
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._load_modules
python
def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors
Return the functions and the returners loaded up from the loader module
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L1359-L1419
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._fire_master
python
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True
Fire an event on the master, or drop message if unable to send.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L1451-L1492
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion.ctx
python
def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack
Return a single context manager for the minion's data
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L1581-L1596
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._thread_return
python
def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc )
This method should be used as a threading target, start the actual minion side execution.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L1631-L1851
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):\n '''\n Detect the args and kwargs that need to be passed to a function call, and\n check them against what was passed.\n '''\n argspec = salt.utils.args.get_function_argspec(func)\n _args = []\n _kwargs = {}\n invalid_kwargs = []\n\n for arg in args:\n if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:\n # if the arg is a dict with __kwarg__ == True, then its a kwarg\n for key, val in six.iteritems(arg):\n if argspec.keywords or key in argspec.args:\n # Function supports **kwargs or is a positional argument to\n # the function.\n _kwargs[key] = val\n else:\n # **kwargs not in argspec and parsed argument name not in\n # list of positional arguments. This keyword argument is\n # invalid.\n invalid_kwargs.append('{0}={1}'.format(key, val))\n continue\n\n else:\n string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632\n if string_kwarg:\n if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:\n # Function supports **kwargs or is a positional argument to\n # the function.\n _kwargs.update(string_kwarg)\n else:\n # **kwargs not in argspec and parsed argument name not in\n # list of positional arguments. This keyword argument is\n # invalid.\n for key, val in six.iteritems(string_kwarg):\n invalid_kwargs.append('{0}={1}'.format(key, val))\n else:\n _args.append(arg)\n\n if invalid_kwargs and not ignore_invalid:\n salt.utils.args.invalid_kwargs(invalid_kwargs)\n\n if argspec.keywords and isinstance(data, dict):\n # this function accepts **kwargs, pack in the publish data\n for key, val in six.iteritems(data):\n _kwargs['__pub_{0}'.format(key)] = val\n\n return _args, _kwargs\n", "def appendproctitle(name):\n '''\n Append \"name\" to the current process title\n '''\n if HAS_SETPROCTITLE:\n setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + name)\n", "def fire_exception(exc, opts, job=None, node='minion'):\n '''\n Fire raw exception across the event bus\n '''\n if job is None:\n job = {}\n event = salt.utils.event.SaltEvent(node, opts=opts, listen=False)\n event.fire_event(pack_exception(exc), '_salt_error')\n" ]
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._thread_multi_return
python
def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc )
This method should be used as a threading target, start the actual minion side execution.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L1854-L1971
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):\n '''\n Detect the args and kwargs that need to be passed to a function call, and\n check them against what was passed.\n '''\n argspec = salt.utils.args.get_function_argspec(func)\n _args = []\n _kwargs = {}\n invalid_kwargs = []\n\n for arg in args:\n if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:\n # if the arg is a dict with __kwarg__ == True, then its a kwarg\n for key, val in six.iteritems(arg):\n if argspec.keywords or key in argspec.args:\n # Function supports **kwargs or is a positional argument to\n # the function.\n _kwargs[key] = val\n else:\n # **kwargs not in argspec and parsed argument name not in\n # list of positional arguments. This keyword argument is\n # invalid.\n invalid_kwargs.append('{0}={1}'.format(key, val))\n continue\n\n else:\n string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632\n if string_kwarg:\n if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:\n # Function supports **kwargs or is a positional argument to\n # the function.\n _kwargs.update(string_kwarg)\n else:\n # **kwargs not in argspec and parsed argument name not in\n # list of positional arguments. This keyword argument is\n # invalid.\n for key, val in six.iteritems(string_kwarg):\n invalid_kwargs.append('{0}={1}'.format(key, val))\n else:\n _args.append(arg)\n\n if invalid_kwargs and not ignore_invalid:\n salt.utils.args.invalid_kwargs(invalid_kwargs)\n\n if argspec.keywords and isinstance(data, dict):\n # this function accepts **kwargs, pack in the publish data\n for key, val in six.iteritems(data):\n _kwargs['__pub_{0}'.format(key)] = val\n\n return _args, _kwargs\n", "def appendproctitle(name):\n '''\n Append \"name\" to the current process title\n '''\n if HAS_SETPROCTITLE:\n setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + name)\n" ]
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._return_pub_multi
python
def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val
Return the data from the executed command to the master server
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2058-L2142
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._state_run
python
def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data)
Execute a state run based on information set in the minion config file
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2144-L2167
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._refresh_grains_watcher
python
def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } })
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2169-L2185
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion.module_refresh
python
def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners
Refresh the functions and returners.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2207-L2215
[ "def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None):\n '''\n Return the functions and the returners loaded up from the loader\n module\n '''\n opt_in = True\n if not opts:\n opts = self.opts\n opt_in = False\n # if this is a *nix system AND modules_max_memory is set, lets enforce\n # a memory limit on module imports\n # this feature ONLY works on *nix like OSs (resource module doesn't work on windows)\n modules_max_memory = False\n if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:\n log.debug(\n 'modules_max_memory set, enforcing a maximum of %s',\n opts['modules_max_memory']\n )\n modules_max_memory = True\n old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)\n rss, vms = psutil.Process(os.getpid()).memory_info()[:2]\n mem_limit = rss + vms + opts['modules_max_memory']\n resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))\n elif opts.get('modules_max_memory', -1) > 0:\n if not HAS_PSUTIL:\n log.error('Unable to enforce modules_max_memory because psutil is missing')\n if not HAS_RESOURCE:\n log.error('Unable to enforce modules_max_memory because resource is missing')\n\n # This might be a proxy minion\n if hasattr(self, 'proxy'):\n proxy = self.proxy\n else:\n proxy = None\n\n if grains is None:\n opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy)\n self.utils = salt.loader.utils(opts, proxy=proxy)\n\n if opts.get('multimaster', False):\n s_opts = copy.deepcopy(opts)\n functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,\n loaded_base_name=self.loaded_base_name, notify=notify)\n else:\n functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy)\n returners = salt.loader.returners(opts, functions, proxy=proxy)\n errors = {}\n if '_errors' in functions:\n errors = functions['_errors']\n functions.pop('_errors')\n\n # we're done, reset the limits!\n if modules_max_memory is True:\n resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)\n\n executors = salt.loader.executors(opts, functions, proxy=proxy)\n\n if opt_in:\n self.opts = opts\n\n return functions, returners, errors, executors\n" ]
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion.beacons_refresh
python
def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions)
Refresh the functions and returners.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2217-L2222
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion.matchers_refresh
python
def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts)
Refresh the matchers
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2224-L2229
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion.pillar_refresh
python
def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh()
Refresh the pillar
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2233-L2259
[ "def get_event(\n node, sock_dir=None, transport='zeromq',\n opts=None, listen=True, io_loop=None, keep_loop=False, raise_errors=False):\n '''\n Return an event object suitable for the named transport\n\n :param IOLoop io_loop: Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n sock_dir = sock_dir or opts['sock_dir']\n # TODO: AIO core is separate from transport\n if node == 'master':\n return MasterEvent(sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n return SaltEvent(node,\n sock_dir,\n opts,\n listen=listen,\n io_loop=io_loop,\n keep_loop=keep_loop,\n raise_errors=raise_errors)\n", "def get_async_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None,\n pillar_override=None, pillarenv=None,\n extra_minion_data=None):\n '''\n Return the correct pillar driver based on the file_client option\n '''\n file_client = opts['file_client']\n if opts.get('master_type') == 'disable' and file_client == 'remote':\n file_client = 'local'\n ptype = {\n 'remote': AsyncRemotePillar,\n 'local': AsyncPillar,\n }.get(file_client, AsyncPillar)\n return ptype(opts, grains, minion_id, saltenv, ext, functions=funcs,\n pillar_override=pillar_override, pillarenv=pillarenv,\n extra_minion_data=extra_minion_data)\n", "def fire_event(self, data, tag, timeout=1000):\n '''\n Send a single event into the publisher with payload dict \"data\" and\n event identifier \"tag\"\n\n The default is 1000 ms\n '''\n if not six.text_type(tag): # no empty tags allowed\n raise ValueError('Empty tag.')\n\n if not isinstance(data, MutableMapping): # data must be dict\n raise ValueError(\n 'Dict object expected, not \\'{0}\\'.'.format(data)\n )\n\n if not self.cpush:\n if timeout is not None:\n timeout_s = float(timeout) / 1000\n else:\n timeout_s = None\n if not self.connect_pull(timeout=timeout_s):\n return False\n\n data['_stamp'] = datetime.datetime.utcnow().isoformat()\n\n tagend = TAGEND\n if six.PY2:\n dump_data = self.serial.dumps(data)\n else:\n # Since the pack / unpack logic here is for local events only,\n # it is safe to change the wire protocol. The mechanism\n # that sends events from minion to master is outside this\n # file.\n dump_data = self.serial.dumps(data, use_bin_type=True)\n\n serialized_data = salt.utils.dicttrim.trim_dict(\n dump_data,\n self.opts['max_event_size'],\n is_msgpacked=True,\n use_bin_type=six.PY3\n )\n log.debug('Sending event: tag = %s; data = %s', tag, data)\n event = b''.join([\n salt.utils.stringutils.to_bytes(tag),\n salt.utils.stringutils.to_bytes(tagend),\n serialized_data])\n msg = salt.utils.stringutils.to_bytes(event, 'utf-8')\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n try:\n self.io_loop.run_sync(lambda: self.pusher.send(msg))\n except Exception as ex:\n log.debug(ex)\n raise\n else:\n self.io_loop.spawn_callback(self.pusher.send, msg)\n return True\n", "def module_refresh(self, force_refresh=False, notify=False):\n '''\n Refresh the functions and returners.\n '''\n log.debug('Refreshing modules. Notify=%s', notify)\n self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)\n\n self.schedule.functions = self.functions\n self.schedule.returners = self.returners\n", "def beacons_refresh(self):\n '''\n Refresh the functions and returners.\n '''\n log.debug('Refreshing beacons.')\n self.beacons = salt.beacons.Beacon(self.opts, self.functions)\n", "def matchers_refresh(self):\n '''\n Refresh the matchers\n '''\n log.debug('Refreshing matchers.')\n self.matchers = salt.loader.matchers(self.opts)\n" ]
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion.manage_schedule
python
def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func)
Refresh the functions and returners.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2261-L2293
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion.manage_beacons
python
def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG )
Manage Beacons
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2295-L2330
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion.environ_setenv
python
def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all)
Set the salt-minion main process environment according to the data contained in the minion event data
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2332-L2343
[ "def setenv(environ, false_unsets=False, clear_all=False, update_minion=False, permanent=False):\n '''\n Set multiple salt process environment variables from a dict.\n Returns a dict.\n\n environ\n Must be a dict. The top-level keys of the dict are the names\n of the environment variables to set. Each key's value must be\n a string or False. Refer to the 'false_unsets' parameter for\n behavior when a value set to False.\n\n false_unsets\n If a key's value is False and false_unsets is True, then the\n key will be removed from the salt processes environment dict\n entirely. If a key's value is False and false_unsets is not\n True, then the key's value will be set to an empty string.\n Default: False\n\n clear_all\n USE WITH CAUTION! This option can unset environment variables\n needed for salt to function properly.\n If clear_all is True, then any environment variables not\n defined in the environ dict will be deleted.\n Default: False\n\n update_minion\n If True, apply these environ changes to the main salt-minion\n process. If False, the environ changes will only affect the\n current salt subprocess.\n Default: False\n\n permanent\n On Windows minions this will set the environment variable in the\n registry so that it is always added as an environment variable when\n applications open. If you want to set the variable to HKLM instead of\n HKCU just pass in \"HKLM\" for this parameter. On all other minion types\n this will be ignored. Note: This will only take affect on applications\n opened after this has been set.\n\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' environ.setenv '{\"foo\": \"bar\", \"baz\": \"quux\"}'\n salt '*' environ.setenv '{\"a\": \"b\", \"c\": False}' false_unsets=True\n '''\n ret = {}\n if not isinstance(environ, dict):\n log.debug(\n '%s: \\'environ\\' argument is not a dict: \\'%s\\'',\n __name__, environ\n )\n return False\n if clear_all is True:\n # Unset any keys not defined in 'environ' dict supplied by user\n to_unset = [key for key in os.environ if key not in environ]\n for key in to_unset:\n ret[key] = setval(key, False, false_unsets, permanent=permanent)\n for key, val in six.iteritems(environ):\n if isinstance(val, six.string_types):\n ret[key] = setval(key, val, permanent=permanent)\n elif val is False:\n ret[key] = setval(key, val, false_unsets, permanent=permanent)\n else:\n log.debug(\n '%s: \\'val\\' argument for key \\'%s\\' is not a string '\n 'or False: \\'%s\\'', __name__, key, val\n )\n return False\n\n if update_minion is True:\n __salt__['event.fire']({'environ': environ,\n 'false_unsets': false_unsets,\n 'clear_all': clear_all,\n 'permanent': permanent\n },\n 'environ_setenv')\n return ret\n" ]
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._pre_tune
python
def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err )
Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2345-L2378
[ "def get_user():\n '''\n Get the current user\n '''\n if HAS_PWD:\n ret = pwd.getpwuid(os.geteuid()).pw_name\n elif HAS_WIN_FUNCTIONS and salt.utils.win_functions.HAS_WIN32:\n ret = salt.utils.win_functions.get_current_user()\n else:\n raise CommandExecutionError(\n 'Required external library (pwd or win32api) not installed')\n return salt.utils.stringutils.to_unicode(ret)\n" ]
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._mine_send
python
def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close()
Send mine data to the master
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2380-L2393
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._handle_tag_module_refresh
python
def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) )
Handle a module_refresh event
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2395-L2402
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._handle_tag_pillar_refresh
python
def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) )
Handle a pillar_refresh event
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2405-L2412
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._handle_tag_grains_refresh
python
def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains']
Handle a grains_refresh event
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2438-L2445
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._handle_tag_fire_master
python
def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
Handle a fire_master event
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2459-L2465
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._handle_tag_master_disconnected_failback
python
def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop()
Handle a master_disconnected_failback event
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2467-L2581
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._handle_tag_master_connected
python
def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True)
Handle a master_connected event
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2583-L2611
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._handle_tag_schedule_return
python
def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False)
Handle a _schedule_return event
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2613-L2624
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._handle_tag_salt_error
python
def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag)
Handle a _salt_error event
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2626-L2632
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._handle_tag_salt_auth_creds
python
def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
Handle a salt_auth_creds event
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2634-L2643
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion.handle_event
python
def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data)
Handle an event from the epull_sock (all local minion events)
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2646-L2679
[ "def master_event(type, master=None):\n '''\n Centralized master event function which will return event type based on event_map\n '''\n event_map = {'connected': '__master_connected',\n 'disconnected': '__master_disconnected',\n 'failback': '__master_failback',\n 'alive': '__master_alive'}\n\n if type == 'alive' and master is not None:\n return '{0}_{1}'.format(event_map.get(type), master)\n\n return event_map.get(type, None)\n", "def unpack(cls, raw, serial=None):\n if serial is None:\n serial = salt.payload.Serial({'serial': 'msgpack'})\n\n if six.PY2:\n mtag, sep, mdata = raw.partition(TAGEND) # split tag from data\n data = serial.loads(mdata, encoding='utf-8')\n else:\n mtag, sep, mdata = raw.partition(salt.utils.stringutils.to_bytes(TAGEND)) # split tag from data\n mtag = salt.utils.stringutils.to_str(mtag)\n data = serial.loads(mdata, encoding='utf-8')\n return mtag, data\n" ]
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._fallback_cleanups
python
def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2681-L2698
null
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion._setup_core
python
def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True
Set up the core minion attributes. This is safe to call multiple times.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2700-L2716
[ "def get_proc_dir(cachedir, **kwargs):\n '''\n Given the cache directory, return the directory that process data is\n stored in, creating it if it doesn't exist.\n The following optional Keyword Arguments are handled:\n\n mode: which is anything os.makedir would accept as mode.\n\n uid: the uid to set, if not set, or it is None or -1 no changes are\n made. Same applies if the directory is already owned by this\n uid. Must be int. Works only on unix/unix like systems.\n\n gid: the gid to set, if not set, or it is None or -1 no changes are\n made. Same applies if the directory is already owned by this\n gid. Must be int. Works only on unix/unix like systems.\n '''\n fn_ = os.path.join(cachedir, 'proc')\n mode = kwargs.pop('mode', None)\n\n if mode is None:\n mode = {}\n else:\n mode = {'mode': mode}\n\n if not os.path.isdir(fn_):\n # proc_dir is not present, create it with mode settings\n os.makedirs(fn_, **mode)\n\n d_stat = os.stat(fn_)\n\n # if mode is not an empty dict then we have an explicit\n # dir mode. So lets check if mode needs to be changed.\n if mode:\n mode_part = S_IMODE(d_stat.st_mode)\n if mode_part != mode['mode']:\n os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])\n\n if hasattr(os, 'chown'):\n # only on unix/unix like systems\n uid = kwargs.pop('uid', -1)\n gid = kwargs.pop('gid', -1)\n\n # if uid and gid are both -1 then go ahead with\n # no changes at all\n if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \\\n [i for i in (uid, gid) if i != -1]:\n os.chown(fn_, uid, gid)\n\n return fn_\n", "def _prep_mod_opts(self):\n '''\n Returns a copy of the opts with key bits stripped out\n '''\n mod_opts = {}\n for key, val in six.iteritems(self.opts):\n if key == 'logger':\n continue\n mod_opts[key] = val\n return mod_opts\n", "def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None):\n '''\n Return the functions and the returners loaded up from the loader\n module\n '''\n opt_in = True\n if not opts:\n opts = self.opts\n opt_in = False\n # if this is a *nix system AND modules_max_memory is set, lets enforce\n # a memory limit on module imports\n # this feature ONLY works on *nix like OSs (resource module doesn't work on windows)\n modules_max_memory = False\n if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:\n log.debug(\n 'modules_max_memory set, enforcing a maximum of %s',\n opts['modules_max_memory']\n )\n modules_max_memory = True\n old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)\n rss, vms = psutil.Process(os.getpid()).memory_info()[:2]\n mem_limit = rss + vms + opts['modules_max_memory']\n resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))\n elif opts.get('modules_max_memory', -1) > 0:\n if not HAS_PSUTIL:\n log.error('Unable to enforce modules_max_memory because psutil is missing')\n if not HAS_RESOURCE:\n log.error('Unable to enforce modules_max_memory because resource is missing')\n\n # This might be a proxy minion\n if hasattr(self, 'proxy'):\n proxy = self.proxy\n else:\n proxy = None\n\n if grains is None:\n opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy)\n self.utils = salt.loader.utils(opts, proxy=proxy)\n\n if opts.get('multimaster', False):\n s_opts = copy.deepcopy(opts)\n functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,\n loaded_base_name=self.loaded_base_name, notify=notify)\n else:\n functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy)\n returners = salt.loader.returners(opts, functions, proxy=proxy)\n errors = {}\n if '_errors' in functions:\n errors = functions['_errors']\n functions.pop('_errors')\n\n # we're done, reset the limits!\n if modules_max_memory is True:\n resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)\n\n executors = salt.loader.executors(opts, functions, proxy=proxy)\n\n if opt_in:\n self.opts = opts\n\n return functions, returners, errors, executors\n" ]
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion.setup_beacons
python
def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks)
Set up the beacons. This is safe to call multiple times.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2718-L2755
[ "def itervalues(d, **kw):\n return d.itervalues(**kw)\n", " def _setup_core(self):\n '''\n Set up the core minion attributes.\n This is safe to call multiple times.\n '''\n if not self.ready:\n # First call. Initialize.\n self.functions, self.returners, self.function_errors, self.executors = self._load_modules()\n self.serial = salt.payload.Serial(self.opts)\n self.mod_opts = self._prep_mod_opts()\n# self.matcher = Matcher(self.opts, self.functions)\n self.matchers = salt.loader.matchers(self.opts)\n self.beacons = salt.beacons.Beacon(self.opts, self.functions)\n uid = salt.utils.user.get_uid(user=self.opts.get('user', None))\n self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)\n self.grains_cache = self.opts['grains']\n self.ready = True\n", "def handle_beacons():\n # Process Beacons\n beacons = None\n try:\n beacons = self.process_beacons(self.functions)\n except Exception:\n log.critical('The beacon errored: ', exc_info=True)\n if beacons and self.connected:\n self._fire_master(events=beacons)\n" ]
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion.setup_scheduler
python
def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks)
Set up the scheduler. This is safe to call multiple times.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2757-L2808
[ "def itervalues(d, **kw):\n return d.itervalues(**kw)\n", "def master_event(type, master=None):\n '''\n Centralized master event function which will return event type based on event_map\n '''\n event_map = {'connected': '__master_connected',\n 'disconnected': '__master_disconnected',\n 'failback': '__master_failback',\n 'alive': '__master_alive'}\n\n if type == 'alive' and master is not None:\n return '{0}_{1}'.format(event_map.get(type), master)\n\n return event_map.get(type, None)\n", "def _refresh_grains_watcher(self, refresh_interval_in_minutes):\n '''\n Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion\n :param refresh_interval_in_minutes:\n :return: None\n '''\n if '__update_grains' not in self.opts.get('schedule', {}):\n if 'schedule' not in self.opts:\n self.opts['schedule'] = {}\n self.opts['schedule'].update({\n '__update_grains':\n {\n 'function': 'event.fire',\n 'args': [{}, 'grains_refresh'],\n 'minutes': refresh_interval_in_minutes\n }\n })\n", " def _setup_core(self):\n '''\n Set up the core minion attributes.\n This is safe to call multiple times.\n '''\n if not self.ready:\n # First call. Initialize.\n self.functions, self.returners, self.function_errors, self.executors = self._load_modules()\n self.serial = salt.payload.Serial(self.opts)\n self.mod_opts = self._prep_mod_opts()\n# self.matcher = Matcher(self.opts, self.functions)\n self.matchers = salt.loader.matchers(self.opts)\n self.beacons = salt.beacons.Beacon(self.opts, self.functions)\n uid = salt.utils.user.get_uid(user=self.opts.get('user', None))\n self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)\n self.grains_cache = self.opts['grains']\n self.ready = True\n", "def handle_schedule():\n self.process_schedule(self, loop_interval)\n" ]
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion.tune_in
python
def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy()
Lock onto the publisher. This is the main event loop for the minion :rtype : None
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2811-L2883
[ "def enable_sigusr1_handler():\n '''\n Pretty print a stack trace to the console or a debug log under /tmp\n when any of the salt daemons such as salt-master are sent a SIGUSR1\n '''\n enable_sig_handler('SIGUSR1', _handle_sigusr1)\n # Also canonical BSD-way of printing progress is SIGINFO\n # which on BSD-derivatives can be sent via Ctrl+T\n enable_sig_handler('SIGINFO', _handle_sigusr1)\n", "def enable_ctrl_logoff_handler():\n if HAS_WIN32:\n ctrl_logoff_event = 5\n win32api.SetConsoleCtrlHandler(\n lambda event: True if event == ctrl_logoff_event else False,\n 1\n )\n", "def _state_run(self):\n '''\n Execute a state run based on information set in the minion config file\n '''\n if self.opts['startup_states']:\n if self.opts.get('master_type', 'str') == 'disable' and \\\n self.opts.get('file_client', 'remote') == 'remote':\n log.warning(\n 'Cannot run startup_states when \\'master_type\\' is set '\n 'to \\'disable\\' and \\'file_client\\' is set to '\n '\\'remote\\'. Skipping.'\n )\n else:\n data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}\n if self.opts['startup_states'] == 'sls':\n data['fun'] = 'state.sls'\n data['arg'] = [self.opts['sls_list']]\n elif self.opts['startup_states'] == 'top':\n data['fun'] = 'state.top'\n data['arg'] = [self.opts['top_file']]\n else:\n data['fun'] = 'state.highstate'\n data['arg'] = []\n self._handle_decoded_payload(data)\n", "def _fire_master_minion_start(self):\n # Send an event to the master that the minion is live\n if self.opts['enable_legacy_startup_events']:\n # Old style event. Defaults to False in Sodium release.\n self._fire_master(\n 'Minion {0} started at {1}'.format(\n self.opts['id'],\n time.asctime()\n ),\n 'minion_start'\n )\n # send name spaced event\n self._fire_master(\n 'Minion {0} started at {1}'.format(\n self.opts['id'],\n time.asctime()\n ),\n tagify([self.opts['id'], 'start'], 'minion'),\n )\n", "def _pre_tune(self):\n '''\n Set the minion running flag and issue the appropriate warnings if\n the minion cannot be started or is already running\n '''\n if self._running is None:\n self._running = True\n elif self._running is False:\n log.error(\n 'This %s was scheduled to stop. Not running %s.tune_in()',\n self.__class__.__name__, self.__class__.__name__\n )\n return\n elif self._running is True:\n log.error(\n 'This %s is already running. Not running %s.tune_in()',\n self.__class__.__name__, self.__class__.__name__\n )\n return\n\n try:\n log.info(\n '%s is starting as user \\'%s\\'',\n self.__class__.__name__, salt.utils.user.get_user()\n )\n except Exception as err:\n # Only windows is allowed to fail here. See #3189. Log as debug in\n # that case. Else, error.\n log.log(\n salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR,\n 'Failed to get the user who is starting %s',\n self.__class__.__name__,\n exc_info=err\n )\n", "def setup_beacons(self, before_connect=False):\n '''\n Set up the beacons.\n This is safe to call multiple times.\n '''\n self._setup_core()\n\n loop_interval = self.opts['loop_interval']\n new_periodic_callbacks = {}\n\n if 'beacons' not in self.periodic_callbacks:\n self.beacons = salt.beacons.Beacon(self.opts, self.functions)\n\n def handle_beacons():\n # Process Beacons\n beacons = None\n try:\n beacons = self.process_beacons(self.functions)\n except Exception:\n log.critical('The beacon errored: ', exc_info=True)\n if beacons and self.connected:\n self._fire_master(events=beacons)\n\n new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(\n handle_beacons, loop_interval * 1000)\n if before_connect:\n # Make sure there is a chance for one iteration to occur before connect\n handle_beacons()\n\n if 'cleanup' not in self.periodic_callbacks:\n new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(\n self._fallback_cleanups, loop_interval * 1000)\n\n # start all the other callbacks\n for periodic_cb in six.itervalues(new_periodic_callbacks):\n periodic_cb.start()\n\n self.periodic_callbacks.update(new_periodic_callbacks)\n", "def setup_scheduler(self, before_connect=False):\n '''\n Set up the scheduler.\n This is safe to call multiple times.\n '''\n self._setup_core()\n\n loop_interval = self.opts['loop_interval']\n new_periodic_callbacks = {}\n\n if 'schedule' not in self.periodic_callbacks:\n if 'schedule' not in self.opts:\n self.opts['schedule'] = {}\n if not hasattr(self, 'schedule'):\n self.schedule = salt.utils.schedule.Schedule(\n self.opts,\n self.functions,\n self.returners,\n utils=self.utils,\n cleanup=[master_event(type='alive')])\n\n try:\n if self.opts['grains_refresh_every']: # In minutes, not seconds!\n log.debug(\n 'Enabling the grains refresher. Will run every %d minute(s).',\n self.opts['grains_refresh_every']\n )\n self._refresh_grains_watcher(abs(self.opts['grains_refresh_every']))\n except Exception as exc:\n log.error(\n 'Exception occurred in attempt to initialize grain refresh '\n 'routine during minion tune-in: %s', exc\n )\n\n # TODO: actually listen to the return and change period\n def handle_schedule():\n self.process_schedule(self, loop_interval)\n new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)\n\n if before_connect:\n # Make sure there is a chance for one iteration to occur before connect\n handle_schedule()\n\n if 'cleanup' not in self.periodic_callbacks:\n new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(\n self._fallback_cleanups, loop_interval * 1000)\n\n # start all the other callbacks\n for periodic_cb in six.itervalues(new_periodic_callbacks):\n periodic_cb.start()\n\n self.periodic_callbacks.update(new_periodic_callbacks)\n" ]
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop() def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Minion.destroy
python
def destroy(self): ''' Tear down the minion ''' if self._running is False: return self._running = False if hasattr(self, 'schedule'): del self.schedule if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel if hasattr(self, 'periodic_callbacks'): for cb in six.itervalues(self.periodic_callbacks): cb.stop()
Tear down the minion
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2928-L2945
[ "def itervalues(d, **kw):\n return d.itervalues(**kw)\n" ]
class Minion(MinionBase): ''' This class instantiates a minion, runs connections for a minion, and loads all of the functions into the minion ''' def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231 ''' Pass in the options dict ''' # this means that the parent class doesn't know *which* master we connect to super(Minion, self).__init__(opts) self.timeout = timeout self.safe = safe self._running = None self.win_proc = [] self.loaded_base_name = loaded_base_name self.connected = False self.restart = False # Flag meaning minion has finished initialization including first connect to the master. # True means the Minion is fully functional and ready to handle events. self.ready = False self.jid_queue = [] if jid_queue is None else jid_queue self.periodic_callbacks = {} if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # Warn if ZMQ < 3.2 if zmq: if ZMQ_VERSION_INFO < (3, 2): log.warning( 'You have a version of ZMQ less than ZMQ 3.2! There are ' 'known connection keep-alive issues with ZMQ < 3.2 which ' 'may result in loss of contact with minions. Please ' 'upgrade your ZMQ!' ) # Late setup of the opts grains, so we can log from the grains # module. If this is a proxy, however, we need to init the proxymodule # before we can get the grains. We do this for proxies in the # post_master_init if not salt.utils.platform.is_proxy(): self.opts['grains'] = salt.loader.grains(opts) else: if self.opts.get('beacons_before_connect', False): log.warning( '\'beacons_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['beacons_before_connect'] = False if self.opts.get('scheduler_before_connect', False): log.warning( '\'scheduler_before_connect\' is not supported ' 'for proxy minions. Setting to False' ) self.opts['scheduler_before_connect'] = False log.info('Creating minion process manager') if self.opts['random_startup_delay']: sleep_time = random.randint(0, self.opts['random_startup_delay']) log.info( 'Minion sleeping for %s seconds due to configured ' 'startup_delay between 0 and %s seconds', sleep_time, self.opts['random_startup_delay'] ) time.sleep(sleep_time) self.process_manager = ProcessManager(name='MinionProcessManager') self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # We don't have the proxy setup yet, so we can't start engines # Engines need to be able to access __proxy__ if not salt.utils.platform.is_proxy(): self.io_loop.spawn_callback(salt.engines.start_engines, self.opts, self.process_manager) # Install the SIGINT/SIGTERM handlers if not done so far if signal.getsignal(signal.SIGINT) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGINT, self._handle_signals) if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL: # No custom signal handling was added, install our own signal.signal(signal.SIGTERM, self._handle_signals) def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument self._running = False # escalate the signals to the process manager self.process_manager.stop_restarting() self.process_manager.send_signal_to_processes(signum) # kill any remaining processes self.process_manager.kill_children() time.sleep(1) sys.exit(0) def sync_connect_master(self, timeout=None, failed=False): ''' Block until we are connected to a master ''' self._sync_connect_master_success = False log.debug("sync_connect_master") def on_connect_master_future_done(future): self._sync_connect_master_success = True self.io_loop.stop() self._connect_master_future = self.connect_master(failed=failed) # finish connecting to master self._connect_master_future.add_done_callback(on_connect_master_future_done) if timeout: self.io_loop.call_later(timeout, self.io_loop.stop) try: self.io_loop.start() except KeyboardInterrupt: self.destroy() # I made the following 3 line oddity to preserve traceback. # Please read PR #23978 before changing, hopefully avoiding regressions. # Good luck, we're all counting on you. Thanks. if self._connect_master_future.done(): future_exception = self._connect_master_future.exception() if future_exception: # This needs to be re-raised to preserve restart_on_error behavior. raise six.reraise(*future_exception) if timeout and self._sync_connect_master_success is False: raise SaltDaemonNotRunning('Failed to connect to the salt-master') @tornado.gen.coroutine def connect_master(self, failed=False): ''' Return a future which will complete when you are connected to a master ''' master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed) yield self._post_master_init(master) # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check ProxyMinion._post_master_init to see if those changes need to be propagated. Minions and ProxyMinions need significantly different post master setups, which is why the differences are not factored out into separate helper functions. ''' if self.connected: self.opts['master'] = master # Initialize pillar before loader to make pillar accessible in modules async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv') ) self.opts['pillar'] = yield async_pillar.compile_pillar() async_pillar.destroy() if not self.ready: self._setup_core() elif self.connected and self.opts['pillar']: # The pillar has changed due to the connection to the master. # Reload the functions so that they can use the new pillar data. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() if hasattr(self, 'schedule'): self.schedule.functions = self.functions self.schedule.returners = self.returners if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, cleanup=[master_event(type='alive')]) # add default scheduling jobs to the minions scheduler if self.opts['mine_enabled'] and 'mine.update' in self.functions: self.schedule.add_job({ '__mine_interval': { 'function': 'mine.update', 'minutes': self.opts['mine_interval'], 'jid_include': True, 'maxrunning': 2, 'run_on_start': True, 'return_job': self.opts.get('mine_return_job', False) } }, persist=True) log.info('Added mine.update to scheduler') else: self.schedule.delete_job('__mine_interval', persist=True) # add master_alive job if enabled if (self.opts['transport'] != 'tcp' and self.opts['master_alive_interval'] > 0 and self.connected): self.schedule.add_job({ master_event(type='alive', master=self.opts['master']): { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } }, persist=True) if self.opts['master_failback'] and \ 'master_list' in self.opts and \ self.opts['master'] != self.opts['master_list'][0]: self.schedule.add_job({ master_event(type='failback'): { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } }, persist=True) else: self.schedule.delete_job(master_event(type='failback'), persist=True) else: self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True) self.schedule.delete_job(master_event(type='failback'), persist=True) def _prep_mod_opts(self): ''' Returns a copy of the opts with key bits stripped out ''' mod_opts = {} for key, val in six.iteritems(self.opts): if key == 'logger': continue mod_opts[key] = val return mod_opts def _load_modules(self, force_refresh=False, notify=False, grains=None, opts=None): ''' Return the functions and the returners loaded up from the loader module ''' opt_in = True if not opts: opts = self.opts opt_in = False # if this is a *nix system AND modules_max_memory is set, lets enforce # a memory limit on module imports # this feature ONLY works on *nix like OSs (resource module doesn't work on windows) modules_max_memory = False if opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE: log.debug( 'modules_max_memory set, enforcing a maximum of %s', opts['modules_max_memory'] ) modules_max_memory = True old_mem_limit = resource.getrlimit(resource.RLIMIT_AS) rss, vms = psutil.Process(os.getpid()).memory_info()[:2] mem_limit = rss + vms + opts['modules_max_memory'] resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) elif opts.get('modules_max_memory', -1) > 0: if not HAS_PSUTIL: log.error('Unable to enforce modules_max_memory because psutil is missing') if not HAS_RESOURCE: log.error('Unable to enforce modules_max_memory because resource is missing') # This might be a proxy minion if hasattr(self, 'proxy'): proxy = self.proxy else: proxy = None if grains is None: opts['grains'] = salt.loader.grains(opts, force_refresh, proxy=proxy) self.utils = salt.loader.utils(opts, proxy=proxy) if opts.get('multimaster', False): s_opts = copy.deepcopy(opts) functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy, loaded_base_name=self.loaded_base_name, notify=notify) else: functions = salt.loader.minion_mods(opts, utils=self.utils, notify=notify, proxy=proxy) returners = salt.loader.returners(opts, functions, proxy=proxy) errors = {} if '_errors' in functions: errors = functions['_errors'] functions.pop('_errors') # we're done, reset the limits! if modules_max_memory is True: resource.setrlimit(resource.RLIMIT_AS, old_mem_limit) executors = salt.loader.executors(opts, functions, proxy=proxy) if opt_in: self.opts = opts return functions, returners, errors, executors def _send_req_sync(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.ReqChannel.factory(self.opts) try: return channel.send(load, timeout=timeout) finally: channel.close() @tornado.gen.coroutine def _send_req_async(self, load, timeout): if self.opts['minion_sign_messages']: log.trace('Signing event to be published onto the bus.') minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem') sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load)) load['sig'] = sig channel = salt.transport.client.AsyncReqChannel.factory(self.opts) try: ret = yield channel.send(load, timeout=timeout) raise tornado.gen.Return(ret) finally: channel.close() def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None): ''' Fire an event on the master, or drop message if unable to send. ''' load = {'id': self.opts['id'], 'cmd': '_minion_event', 'pretag': pretag, 'tok': self.tok} if events: load['events'] = events elif data and tag: load['data'] = data load['tag'] = tag elif not data and tag: load['data'] = {} load['tag'] = tag else: return if sync: try: self._send_req_sync(load, timeout) except salt.exceptions.SaltReqTimeoutError: log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return False except Exception: log.info('fire_master failed: %s', traceback.format_exc()) return False else: if timeout_handler is None: def handle_timeout(*_): log.info('fire_master failed: master could not be contacted. Request timed out.') # very likely one of the masters is dead, status.master will flush it self.functions['status.master'](self.opts['master']) return True timeout_handler = handle_timeout with tornado.stack_context.ExceptionStackContext(timeout_handler): self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg return True @tornado.gen.coroutine def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # Ensure payload is unicode. Disregard failure to decode binary blobs. if six.PY2: data = salt.utils.data.decode(data, keep=True) if 'user' in data: log.info( 'User %s Executing command %s with jid %s', data['user'], data['fun'], data['jid'] ) else: log.info( 'Executing command %s with jid %s', data['fun'], data['jid'] ) log.debug('Command details %s', data) # Don't duplicate jobs log.trace('Started JIDs: %s', self.jid_queue) if self.jid_queue is not None: if data['jid'] in self.jid_queue: return else: self.jid_queue.append(data['jid']) if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']: self.jid_queue.pop(0) if isinstance(data['fun'], six.string_types): if data['fun'] == 'sys.reload_modules': self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.schedule.functions = self.functions self.schedule.returners = self.returners process_count_max = self.opts.get('process_count_max') process_count_max_sleep_secs = self.opts.get('process_count_max_sleep_secs') if process_count_max > 0: process_count = len(salt.utils.minion.running(self.opts)) while process_count >= process_count_max: log.warning('Maximum number of processes (%s) reached while ' 'executing jid %s, waiting %s seconds...', process_count_max, data['jid'], process_count_max_sleep_secs) yield tornado.gen.sleep(process_count_max_sleep_secs) process_count = len(salt.utils.minion.running(self.opts)) # We stash an instance references to allow for the socket # communication in Windows. You can't pickle functions, and thus # python needs to be able to reconstruct the reference on the other # side. instance = self multiprocessing_enabled = self.opts.get('multiprocessing', True) if multiprocessing_enabled: if sys.platform.startswith('win'): # let python reconstruct the minion on the other side if we're # running on windows instance = None with default_signals(signal.SIGINT, signal.SIGTERM): process = SignalHandlingMultiprocessingProcess( target=self._target, args=(instance, self.opts, data, self.connected) ) else: process = threading.Thread( target=self._target, args=(instance, self.opts, data, self.connected), name=data['jid'] ) if multiprocessing_enabled: with default_signals(signal.SIGINT, signal.SIGTERM): # Reset current signals before starting the process in # order not to inherit the current signal handlers process.start() else: process.start() # TODO: remove the windows specific check? if multiprocessing_enabled and not salt.utils.platform.is_windows(): # we only want to join() immediately if we are daemonizing a process process.join() elif salt.utils.platform.is_windows(): self.win_proc.append(process) def ctx(self): ''' Return a single context manager for the minion's data ''' if six.PY2: return contextlib.nested( self.functions.context_dict.clone(), self.returners.context_dict.clone(), self.executors.context_dict.clone(), ) else: exitstack = contextlib.ExitStack() exitstack.enter_context(self.functions.context_dict.clone()) exitstack.enter_context(self.returners.context_dict.clone()) exitstack.enter_context(self.executors.context_dict.clone()) return exitstack @classmethod def _target(cls, minion_instance, opts, data, connected): if not minion_instance: minion_instance = cls(opts) minion_instance.connected = connected if not hasattr(minion_instance, 'functions'): functions, returners, function_errors, executors = ( minion_instance._load_modules(grains=opts['grains']) ) minion_instance.functions = functions minion_instance.returners = returners minion_instance.function_errors = function_errors minion_instance.executors = executors if not hasattr(minion_instance, 'serial'): minion_instance.serial = salt.payload.Serial(opts) if not hasattr(minion_instance, 'proc_dir'): uid = salt.utils.user.get_uid(user=opts.get('user', None)) minion_instance.proc_dir = ( get_proc_dir(opts['cachedir'], uid=uid) ) def run_func(minion_instance, opts, data): if isinstance(data['fun'], tuple) or isinstance(data['fun'], list): return Minion._thread_multi_return(minion_instance, opts, data) else: return Minion._thread_return(minion_instance, opts, data) with tornado.stack_context.StackContext(functools.partial(RequestContext, {'data': data, 'opts': opts})): with tornado.stack_context.StackContext(minion_instance.ctx): run_func(minion_instance, opts, data) @classmethod def _thread_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job %s with PID %s', data['jid'], sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) ret = {'success': False} function_name = data['fun'] executors = data.get('module_executors') or \ getattr(minion_instance, 'module_executors', []) or \ opts.get('module_executors', ['direct_call']) allow_missing_funcs = any([ minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name) for executor in executors if '{0}.allow_missing_func' in minion_instance.executors ]) if function_name in minion_instance.functions or allow_missing_funcs is True: try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True # use minion_blackout_whitelist from grains if it exists if minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') if function_name in minion_instance.functions: func = minion_instance.functions[function_name] args, kwargs = load_args_and_kwargs( func, data['arg'], data) else: # only run if function_name is not in minion_instance.functions and allow_missing_funcs is True func = function_name args, kwargs = data['arg'], data minion_instance.functions.pack['__context__']['retcode'] = 0 if isinstance(executors, six.string_types): executors = [executors] elif not isinstance(executors, list) or not executors: raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected". format(executors)) if opts.get('sudo_user', '') and executors[-1] != 'sudo': executors[-1] = 'sudo' # replace the last one with sudo log.trace('Executors list %s', executors) # pylint: disable=no-member for name in executors: fname = '{0}.execute'.format(name) if fname not in minion_instance.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = minion_instance.executors[fname](opts, data, func, args, kwargs) if return_data is not None: break if isinstance(return_data, types.GeneratorType): ind = 0 iret = {} for single in return_data: if isinstance(single, dict) and isinstance(iret, dict): iret.update(single) else: if not iret: iret = [] iret.append(single) tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job') event_data = {'return': single} minion_instance._fire_master(event_data, tag) ind += 1 ret['return'] = iret else: ret['return'] = return_data retcode = minion_instance.functions.pack['__context__'].get( 'retcode', salt.defaults.exitcodes.EX_OK ) if retcode == salt.defaults.exitcodes.EX_OK: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(return_data.get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = salt.defaults.exitcodes.EX_GENERIC ret['retcode'] = retcode ret['success'] = retcode == salt.defaults.exitcodes.EX_OK except CommandNotFoundError as exc: msg = 'Command required for \'{0}\' not found'.format( function_name ) log.debug(msg, exc_info=True) ret['return'] = '{0}: {1}'.format(msg, exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except CommandExecutionError as exc: log.error( 'A command in \'%s\' had a problem: %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR: {0}'.format(exc) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except SaltInvocationError as exc: log.error( 'Problem executing \'%s\': %s', function_name, exc, exc_info_on_loglevel=logging.DEBUG ) ret['return'] = 'ERROR executing \'{0}\': {1}'.format( function_name, exc ) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except TypeError as exc: msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format( function_name, exc, func.__doc__ or '' ) log.warning(msg, exc_info_on_loglevel=logging.DEBUG) ret['return'] = msg ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC except Exception: msg = 'The minion function caused an exception' log.warning(msg, exc_info_on_loglevel=True) salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data) ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc()) ret['out'] = 'nested' ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC else: docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name)) if docs: docs[function_name] = minion_instance.functions.missing_fun_string(function_name) ret['return'] = docs else: ret['return'] = minion_instance.functions.missing_fun_string(function_name) mod_name = function_name.split('.')[0] if mod_name in minion_instance.function_errors: ret['return'] += ' Possible reasons: \'{0}\''.format( minion_instance.function_errors[mod_name] ) ret['success'] = False ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC ret['out'] = 'nested' ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'master_id' in data: ret['master_id'] = data['master_id'] if 'metadata' in data: if isinstance(data['metadata'], dict): ret['metadata'] = data['metadata'] else: log.warning('The metadata parameter must be a dictionary. Ignoring.') if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) # Add default returners from minion config # Should have been coverted to comma-delimited string already if isinstance(opts.get('return'), six.string_types): if data['ret']: data['ret'] = ','.join((data['ret'], opts['return'])) else: data['ret'] = opts['return'] log.debug('minion return: %s', ret) # TODO: make a list? Seems odd to split it this late :/ if data['ret'] and isinstance(data['ret'], six.string_types): if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] ret['id'] = opts['id'] for returner in set(data['ret'].split(',')): try: returner_str = '{0}.returner'.format(returner) if returner_str in minion_instance.returners: minion_instance.returners[returner_str](ret) else: returner_err = minion_instance.returners.missing_fun_string(returner_str) log.error( 'Returner %s could not be loaded: %s', returner_str, returner_err ) except Exception as exc: log.exception( 'The return failed for job %s: %s', data['jid'], exc ) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): ''' This method should be used as a threading target, start the actual minion side execution. ''' fn_ = os.path.join(minion_instance.proc_dir, data['jid']) if opts['multiprocessing'] and not salt.utils.platform.is_windows(): # Shutdown the multiprocessing before daemonizing salt.log.setup.shutdown_multiprocessing_logging() salt.utils.process.daemonize_if(opts) # Reconfigure multiprocessing logging after daemonizing salt.log.setup.setup_multiprocessing_logging() salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid'])) sdata = {'pid': os.getpid()} sdata.update(data) log.info('Starting a new job with PID %s', sdata['pid']) with salt.utils.files.fopen(fn_, 'w+b') as fp_: fp_.write(minion_instance.serial.dumps(sdata)) multifunc_ordered = opts.get('multifunc_ordered', False) num_funcs = len(data['fun']) if multifunc_ordered: ret = { 'return': [None] * num_funcs, 'retcode': [None] * num_funcs, 'success': [False] * num_funcs } else: ret = { 'return': {}, 'retcode': {}, 'success': {} } for ind in range(0, num_funcs): if not multifunc_ordered: ret['success'][data['fun'][ind]] = False try: minion_blackout_violation = False if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False): whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', []) # this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True elif minion_instance.opts['grains'].get('minion_blackout', False): whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', []) if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist: minion_blackout_violation = True if minion_blackout_violation: raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' ' 'to False in pillar or grains to resume operations. Only ' 'saltutil.refresh_pillar allowed in blackout mode.') func = minion_instance.functions[data['fun'][ind]] args, kwargs = load_args_and_kwargs( func, data['arg'][ind], data) minion_instance.functions.pack['__context__']['retcode'] = 0 key = ind if multifunc_ordered else data['fun'][ind] ret['return'][key] = func(*args, **kwargs) retcode = minion_instance.functions.pack['__context__'].get( 'retcode', 0 ) if retcode == 0: # No nonzero retcode in __context__ dunder. Check if return # is a dictionary with a "result" or "success" key. try: func_result = all(ret['return'][key].get(x, True) for x in ('result', 'success')) except Exception: # return data is not a dict func_result = True if not func_result: retcode = 1 ret['retcode'][key] = retcode ret['success'][key] = retcode == 0 except Exception as exc: trb = traceback.format_exc() log.warning('The minion function caused an exception: %s', exc) if multifunc_ordered: ret['return'][ind] = trb else: ret['return'][data['fun'][ind]] = trb ret['jid'] = data['jid'] ret['fun'] = data['fun'] ret['fun_args'] = data['arg'] if 'metadata' in data: ret['metadata'] = data['metadata'] if minion_instance.connected: minion_instance._return_pub( ret, timeout=minion_instance._return_retry_timer() ) if data['ret']: if 'ret_config' in data: ret['ret_config'] = data['ret_config'] if 'ret_kwargs' in data: ret['ret_kwargs'] = data['ret_kwargs'] for returner in set(data['ret'].split(',')): ret['id'] = opts['id'] try: minion_instance.returners['{0}.returner'.format( returner )](ret) except Exception as exc: log.error( 'The return failed for job %s: %s', data['jid'], exc ) def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) log.trace('Return data: %s', ret) if ret_cmd == '_syndic_return': load = {'cmd': ret_cmd, 'id': self.opts['uid'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__')} if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] load['return'] = {} for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load = {'cmd': ret_cmd, 'id': self.opts['id']} for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled if ret['jid'] == 'req': ret['jid'] = salt.utils.jid.gen_jid(self.opts) salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret) if not self.opts['pub_ret']: return '' def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True): ''' Return the data from the executed command to the master server ''' if not isinstance(rets, list): rets = [rets] jids = {} for ret in rets: jid = ret.get('jid', ret.get('__jid__')) fun = ret.get('fun', ret.get('__fun__')) if self.opts['multiprocessing']: fn_ = os.path.join(self.proc_dir, jid) if os.path.isfile(fn_): try: os.remove(fn_) except (OSError, IOError): # The file is gone already pass log.info('Returning information for job: %s', jid) load = jids.setdefault(jid, {}) if ret_cmd == '_syndic_return': if not load: load.update({'id': self.opts['id'], 'jid': jid, 'fun': fun, 'arg': ret.get('arg'), 'tgt': ret.get('tgt'), 'tgt_type': ret.get('tgt_type'), 'load': ret.get('__load__'), 'return': {}}) if '__master_id__' in ret: load['master_id'] = ret['__master_id__'] for key, value in six.iteritems(ret): if key.startswith('__'): continue load['return'][key] = value else: load.update({'id': self.opts['id']}) for key, value in six.iteritems(ret): load[key] = value if 'out' in ret: if isinstance(ret['out'], six.string_types): load['out'] = ret['out'] else: log.error( 'Invalid outputter %s. This is likely a bug.', ret['out'] ) else: try: oput = self.functions[fun].__outputter__ except (KeyError, AttributeError, TypeError): pass else: if isinstance(oput, six.string_types): load['out'] = oput if self.opts['cache_jobs']: # Local job cache has been enabled salt.utils.minion.cache_jobs(self.opts, load['jid'], ret) load = {'cmd': ret_cmd, 'load': list(six.itervalues(jids))} def timeout_handler(*_): log.warning( 'The minion failed to return the job information for job %s. ' 'This is often due to the master being shut down or ' 'overloaded. If the master is running, consider increasing ' 'the worker_threads value.', jid ) return True if sync: try: ret_val = self._send_req_sync(load, timeout=timeout) except SaltReqTimeoutError: timeout_handler() return '' else: with tornado.stack_context.ExceptionStackContext(timeout_handler): ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg log.trace('ret_val = %s', ret_val) # pylint: disable=no-member return ret_val def _state_run(self): ''' Execute a state run based on information set in the minion config file ''' if self.opts['startup_states']: if self.opts.get('master_type', 'str') == 'disable' and \ self.opts.get('file_client', 'remote') == 'remote': log.warning( 'Cannot run startup_states when \'master_type\' is set ' 'to \'disable\' and \'file_client\' is set to ' '\'remote\'. Skipping.' ) else: data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')} if self.opts['startup_states'] == 'sls': data['fun'] = 'state.sls' data['arg'] = [self.opts['sls_list']] elif self.opts['startup_states'] == 'top': data['fun'] = 'state.top' data['arg'] = [self.opts['top_file']] else: data['fun'] = 'state.highstate' data['arg'] = [] self._handle_decoded_payload(data) def _refresh_grains_watcher(self, refresh_interval_in_minutes): ''' Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion :param refresh_interval_in_minutes: :return: None ''' if '__update_grains' not in self.opts.get('schedule', {}): if 'schedule' not in self.opts: self.opts['schedule'] = {} self.opts['schedule'].update({ '__update_grains': { 'function': 'event.fire', 'args': [{}, 'grains_refresh'], 'minutes': refresh_interval_in_minutes } }) def _fire_master_minion_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to False in Sodium release. self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'minion_start' ) # send name spaced event self._fire_master( 'Minion {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'minion'), ) def module_refresh(self, force_refresh=False, notify=False): ''' Refresh the functions and returners. ''' log.debug('Refreshing modules. Notify=%s', notify) self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify) self.schedule.functions = self.functions self.schedule.returners = self.returners def beacons_refresh(self): ''' Refresh the functions and returners. ''' log.debug('Refreshing beacons.') self.beacons = salt.beacons.Beacon(self.opts, self.functions) def matchers_refresh(self): ''' Refresh the matchers ''' log.debug('Refreshing matchers.') self.matchers = salt.loader.matchers(self.opts) # TODO: only allow one future in flight at a time? @tornado.gen.coroutine def pillar_refresh(self, force_refresh=False, notify=False): ''' Refresh the pillar ''' if self.connected: log.debug('Refreshing pillar. Notify: %s', notify) async_pillar = salt.pillar.get_async_pillar( self.opts, self.opts['grains'], self.opts['id'], self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ) try: self.opts['pillar'] = yield async_pillar.compile_pillar() if notify: evt = salt.utils.event.get_event('minion', opts=self.opts, listen=False) evt.fire_event({'complete': True}, tag=salt.defaults.events.MINION_PILLAR_COMPLETE) except SaltClientError: # Do not exit if a pillar refresh fails. log.error('Pillar data could not be refreshed. ' 'One or more masters may be down!') finally: async_pillar.destroy() self.module_refresh(force_refresh, notify) self.matchers_refresh() self.beacons_refresh() def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func) def manage_beacons(self, tag, data): ''' Manage Beacons ''' func = data.get('func', None) name = data.get('name', None) beacon_data = data.get('beacon_data', None) include_pillar = data.get('include_pillar', None) include_opts = data.get('include_opts', None) funcs = {'add': ('add_beacon', (name, beacon_data)), 'modify': ('modify_beacon', (name, beacon_data)), 'delete': ('delete_beacon', (name,)), 'enable': ('enable_beacons', ()), 'disable': ('disable_beacons', ()), 'enable_beacon': ('enable_beacon', (name,)), 'disable_beacon': ('disable_beacon', (name,)), 'list': ('list_beacons', (include_opts, include_pillar)), 'list_available': ('list_available_beacons', ()), 'validate_beacon': ('validate_beacon', (name, beacon_data)), 'reset': ('reset', ())} # Call the appropriate beacon function try: alias, params = funcs.get(func) getattr(self.beacons, alias)(*params) except AttributeError: log.error('Function "%s" is unavailable in salt.beacons', func) except TypeError as exc: log.info( 'Failed to handle %s with data(%s). Error: %s', tag, data, exc, exc_info_on_loglevel=logging.DEBUG ) def environ_setenv(self, tag, data): ''' Set the salt-minion main process environment according to the data contained in the minion event data ''' environ = data.get('environ', None) if environ is None: return False false_unsets = data.get('false_unsets', False) clear_all = data.get('clear_all', False) import salt.modules.environ as mod_environ return mod_environ.setenv(environ, false_unsets, clear_all) def _pre_tune(self): ''' Set the minion running flag and issue the appropriate warnings if the minion cannot be started or is already running ''' if self._running is None: self._running = True elif self._running is False: log.error( 'This %s was scheduled to stop. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return elif self._running is True: log.error( 'This %s is already running. Not running %s.tune_in()', self.__class__.__name__, self.__class__.__name__ ) return try: log.info( '%s is starting as user \'%s\'', self.__class__.__name__, salt.utils.user.get_user() ) except Exception as err: # Only windows is allowed to fail here. See #3189. Log as debug in # that case. Else, error. log.log( salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR, 'Failed to get the user who is starting %s', self.__class__.__name__, exc_info=err ) def _mine_send(self, tag, data): ''' Send mine data to the master ''' channel = salt.transport.client.ReqChannel.factory(self.opts) data['tok'] = self.tok try: ret = channel.send(data) return ret except SaltReqTimeoutError: log.warning('Unable to send mine data to master.') return None finally: channel.close() def _handle_tag_module_refresh(self, tag, data): ''' Handle a module_refresh event ''' self.module_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) @tornado.gen.coroutine def _handle_tag_pillar_refresh(self, tag, data): ''' Handle a pillar_refresh event ''' yield self.pillar_refresh( force_refresh=data.get('force_refresh', False), notify=data.get('notify', False) ) def _handle_tag_beacons_refresh(self, tag, data): ''' Handle a beacon_refresh event ''' self.beacons_refresh() def _handle_tag_matchers_refresh(self, tag, data): ''' Handle a matchers_refresh event ''' self.matchers_refresh() def _handle_tag_manage_schedule(self, tag, data): ''' Handle a manage_schedule event ''' self.manage_schedule(tag, data) def _handle_tag_manage_beacons(self, tag, data): ''' Handle a manage_beacons event ''' self.manage_beacons(tag, data) def _handle_tag_grains_refresh(self, tag, data): ''' Handle a grains_refresh event ''' if (data.get('force_refresh', False) or self.grains_cache != self.opts['grains']): self.pillar_refresh(force_refresh=True) self.grains_cache = self.opts['grains'] def _handle_tag_environ_setenv(self, tag, data): ''' Handle a environ_setenv event ''' self.environ_setenv(tag, data) def _handle_tag_minion_mine(self, tag, data): ''' Handle a _minion_mine event ''' self._mine_send(tag, data) def _handle_tag_fire_master(self, tag, data): ''' Handle a fire_master event ''' if self.connected: log.debug('Forwarding master event tag=%s', data['tag']) self._fire_master(data['data'], data['tag'], data['events'], data['pretag']) def _handle_tag_master_disconnected_failback(self, tag, data): ''' Handle a master_disconnected_failback event ''' # if the master disconnect event is for a different master, raise an exception if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']: # not mine master, ignore return if tag.startswith(master_event(type='failback')): # if the master failback event is not for the top master, raise an exception if data['master'] != self.opts['master_list'][0]: raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format( data['master'], self.opts['master'])) # if the master failback event is for the current master, raise an exception elif data['master'] == self.opts['master'][0]: raise SaltException('Already connected to \'{0}\''.format(data['master'])) if self.connected: # we are not connected anymore self.connected = False log.info('Connection to master %s lost', self.opts['master']) # we can't use the config default here because the default '0' value is overloaded # to mean 'if 0 disable the job', but when salt detects a timeout it also sets up # these jobs master_alive_interval = self.opts['master_alive_interval'] or 60 if self.opts['master_type'] != 'failover': # modify the scheduled job to fire on reconnect if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': False} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: # delete the scheduled job to don't interfere with the failover process if self.opts['transport'] != 'tcp': self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) log.info('Trying to tune in to next master from master-list') if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'auth'): self.pub_channel.auth.invalidate() if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication try: master, self.pub_channel = yield self.eval_master( opts=self.opts, failed=True, failback=tag.startswith(master_event(type='failback'))) except SaltClientError: pass if self.connected: self.opts['master'] = master # re-init the subsystems to work with the new master log.info( 'Re-initialising subsystems for new master %s', self.opts['master'] ) # put the current schedule into the new loaders self.opts['schedule'] = self.schedule.option('schedule') self.functions, self.returners, self.function_errors, self.executors = self._load_modules() # make the schedule to use the new 'functions' loader self.schedule.functions = self.functions self.pub_channel.on_recv(self._handle_payload) self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # update scheduled job to run with the new master addr if self.opts['transport'] != 'tcp': schedule = { 'function': 'status.master', 'seconds': master_alive_interval, 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) if self.opts['master_failback'] and 'master_list' in self.opts: if self.opts['master'] != self.opts['master_list'][0]: schedule = { 'function': 'status.ping_master', 'seconds': self.opts['master_failback_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master_list'][0]} } self.schedule.modify_job(name=master_event(type='failback'), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='failback'), persist=True) else: self.restart = True self.io_loop.stop() def _handle_tag_master_connected(self, tag, data): ''' Handle a master_connected event ''' # handle this event only once. otherwise it will pollute the log # also if master type is failover all the reconnection work is done # by `disconnected` event handler and this event must never happen, # anyway check it to be sure if not self.connected and self.opts['master_type'] != 'failover': log.info('Connection to master %s re-established', self.opts['master']) self.connected = True # modify the __master_alive job to only fire, # if the connection is lost again if self.opts['transport'] != 'tcp': if self.opts['master_alive_interval'] > 0: schedule = { 'function': 'status.master', 'seconds': self.opts['master_alive_interval'], 'jid_include': True, 'maxrunning': 1, 'return_job': False, 'kwargs': {'master': self.opts['master'], 'connected': True} } self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']), schedule=schedule) else: self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True) def _handle_tag_schedule_return(self, tag, data): ''' Handle a _schedule_return event ''' # reporting current connection with master if data['schedule'].startswith(master_event(type='alive', master='')): if data['return']: log.debug( 'Connected to master %s', data['schedule'].split(master_event(type='alive', master=''))[1] ) self._return_pub(data, ret_cmd='_return', sync=False) def _handle_tag_salt_error(self, tag, data): ''' Handle a _salt_error event ''' if self.connected: log.debug('Forwarding salt error event tag=%s', tag) self._fire_master(data, tag) def _handle_tag_salt_auth_creds(self, tag, data): ''' Handle a salt_auth_creds event ''' key = tuple(data['key']) log.debug( 'Updating auth data for %s: %s -> %s', key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds'] ) salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds'] @tornado.gen.coroutine def handle_event(self, package): ''' Handle an event from the epull_sock (all local minion events) ''' if not self.ready: raise tornado.gen.Return() tag, data = salt.utils.event.SaltEvent.unpack(package) log.debug( 'Minion of \'%s\' is handling event tag \'%s\'', self.opts['master'], tag ) tag_functions = { 'beacons_refresh': self._handle_tag_beacons_refresh, 'environ_setenv': self._handle_tag_environ_setenv, 'fire_master': self._handle_tag_fire_master, 'grains_refresh': self._handle_tag_grains_refresh, 'matchers_refresh': self._handle_tag_matchers_refresh, 'manage_schedule': self._handle_tag_manage_schedule, 'manage_beacons': self._handle_tag_manage_beacons, '_minion_mine': self._handle_tag_minion_mine, 'module_refresh': self._handle_tag_module_refresh, 'pillar_refresh': self._handle_tag_pillar_refresh, 'salt/auth/creds': self._handle_tag_salt_auth_creds, '_salt_error': self._handle_tag_salt_error, '__schedule_return': self._handle_tag_schedule_return, master_event(type='disconnected'): self._handle_tag_master_disconnected_failback, master_event(type='failback'): self._handle_tag_master_disconnected_failback, master_event(type='connected'): self._handle_tag_master_connected, } # Run the appropriate function for tag_function in tag_functions: if tag.startswith(tag_function): tag_functions[tag_function](tag, data) def _fallback_cleanups(self): ''' Fallback cleanup routines, attempting to fix leaked processes, threads, etc. ''' # Add an extra fallback in case a forked process leaks through multiprocessing.active_children() # Cleanup Windows threads if not salt.utils.platform.is_windows(): return for thread in self.win_proc: if not thread.is_alive(): thread.join() try: self.win_proc.remove(thread) del thread except (ValueError, NameError): pass def _setup_core(self): ''' Set up the core minion attributes. This is safe to call multiple times. ''' if not self.ready: # First call. Initialize. self.functions, self.returners, self.function_errors, self.executors = self._load_modules() self.serial = salt.payload.Serial(self.opts) self.mod_opts = self._prep_mod_opts() # self.matcher = Matcher(self.opts, self.functions) self.matchers = salt.loader.matchers(self.opts) self.beacons = salt.beacons.Beacon(self.opts, self.functions) uid = salt.utils.user.get_uid(user=self.opts.get('user', None)) self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid) self.grains_cache = self.opts['grains'] self.ready = True def setup_beacons(self, before_connect=False): ''' Set up the beacons. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'beacons' not in self.periodic_callbacks: self.beacons = salt.beacons.Beacon(self.opts, self.functions) def handle_beacons(): # Process Beacons beacons = None try: beacons = self.process_beacons(self.functions) except Exception: log.critical('The beacon errored: ', exc_info=True) if beacons and self.connected: self._fire_master(events=beacons) new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback( handle_beacons, loop_interval * 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_beacons() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) def setup_scheduler(self, before_connect=False): ''' Set up the scheduler. This is safe to call multiple times. ''' self._setup_core() loop_interval = self.opts['loop_interval'] new_periodic_callbacks = {} if 'schedule' not in self.periodic_callbacks: if 'schedule' not in self.opts: self.opts['schedule'] = {} if not hasattr(self, 'schedule'): self.schedule = salt.utils.schedule.Schedule( self.opts, self.functions, self.returners, utils=self.utils, cleanup=[master_event(type='alive')]) try: if self.opts['grains_refresh_every']: # In minutes, not seconds! log.debug( 'Enabling the grains refresher. Will run every %d minute(s).', self.opts['grains_refresh_every'] ) self._refresh_grains_watcher(abs(self.opts['grains_refresh_every'])) except Exception as exc: log.error( 'Exception occurred in attempt to initialize grain refresh ' 'routine during minion tune-in: %s', exc ) # TODO: actually listen to the return and change period def handle_schedule(): self.process_schedule(self, loop_interval) new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000) if before_connect: # Make sure there is a chance for one iteration to occur before connect handle_schedule() if 'cleanup' not in self.periodic_callbacks: new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback( self._fallback_cleanups, loop_interval * 1000) # start all the other callbacks for periodic_cb in six.itervalues(new_periodic_callbacks): periodic_cb.start() self.periodic_callbacks.update(new_periodic_callbacks) # Main Minion Tune In def tune_in(self, start=True): ''' Lock onto the publisher. This is the main event loop for the minion :rtype : None ''' self._pre_tune() log.debug('Minion \'%s\' trying to tune in', self.opts['id']) if start: if self.opts.get('beacons_before_connect', False): self.setup_beacons(before_connect=True) if self.opts.get('scheduler_before_connect', False): self.setup_scheduler(before_connect=True) self.sync_connect_master() if self.connected: self._fire_master_minion_start() log.info('Minion is ready to receive requests!') # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() # Make sure to gracefully handle CTRL_LOGOFF_EVENT if HAS_WIN_FUNCTIONS: salt.utils.win_functions.enable_ctrl_logoff_handler() # On first startup execute a state run if configured to do so self._state_run() self.setup_beacons() self.setup_scheduler() # schedule the stuff that runs every interval ping_interval = self.opts.get('ping_interval', 0) * 60 if ping_interval > 0 and self.connected: def ping_master(): try: def ping_timeout_handler(*_): if self.opts.get('auth_safemode', False): log.error('** Master Ping failed. Attempting to restart minion**') delay = self.opts.get('random_reauth_delay', 5) log.info('delaying random_reauth_delay %ss', delay) try: self.functions['service.restart'](service_name()) except KeyError: # Probably no init system (running in docker?) log.warning( 'ping_interval reached without response ' 'from the master, but service.restart ' 'could not be run to restart the minion ' 'daemon. ping_interval requires that the ' 'minion is running under an init system.' ) self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler) except Exception: log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG) self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000) self.periodic_callbacks['ping'].start() # add handler to subscriber if hasattr(self, 'pub_channel') and self.pub_channel is not None: self.pub_channel.on_recv(self._handle_payload) elif self.opts.get('master_type') != 'disable': log.error('No connection to master found. Scheduled jobs will not run.') if start: try: self.io_loop.start() if self.restart: self.destroy() except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown self.destroy() def _handle_payload(self, payload): if payload is not None and payload['enc'] == 'aes': if self._target_load(payload['load']): self._handle_decoded_payload(payload['load']) elif self.opts['zmq_filtering']: # In the filtering enabled case, we'd like to know when minion sees something it shouldnt log.trace( 'Broadcast message received not for this minion, Load: %s', payload['load'] ) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the minion currently has no need. def _target_load(self, load): # Verify that the publication is valid if 'tgt' not in load or 'jid' not in load or 'fun' not in load \ or 'arg' not in load: return False # Verify that the publication applies to this minion # It's important to note that the master does some pre-processing # to determine which minions to send a request to. So for example, # a "salt -G 'grain_key:grain_val' test.ping" will invoke some # pre-processing on the master and this minion should not see the # publication if the master does not determine that it should. if 'tgt_type' in load: match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None) if match_func is None: return False if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'): delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM) if not match_func(load['tgt'], delimiter=delimiter): return False elif not match_func(load['tgt']): return False else: if not self.matchers['glob_match.match'](load['tgt']): return False return True def __del__(self): self.destroy()
saltstack/salt
salt/minion.py
Syndic._handle_decoded_payload
python
def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # TODO: even do this?? data['to'] = int(data.get('to', self.opts['timeout'])) - 1 # Only forward the command if it didn't originate from ourselves if data.get('master_id', 0) != self.opts.get('master_id', 1): self.syndic_cmd(data)
Override this method if you wish to handle the decoded data differently.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2969-L2978
null
class Syndic(Minion): ''' Make a Syndic minion, this minion will use the minion keys on the master to authenticate with a higher level master. ''' def __init__(self, opts, **kwargs): self._syndic_interface = opts.get('interface') self._syndic = True # force auth_safemode True because Syndic don't support autorestart opts['auth_safemode'] = True opts['loop_interval'] = 1 super(Syndic, self).__init__(opts, **kwargs) self.mminion = salt.minion.MasterMinion(opts) self.jid_forward_cache = set() self.jids = {} self.raw_events = [] self.pub_future = None def syndic_cmd(self, data): ''' Take the now clear load and forward it on to the client cmd ''' # Set up default tgt_type if 'tgt_type' not in data: data['tgt_type'] = 'glob' kwargs = {} # optionally add a few fields to the publish data for field in ('master_id', # which master the job came from 'user', # which user ran the job ): if field in data: kwargs[field] = data[field] def timeout_handler(*args): log.warning('Unable to forward pub data: %s', args[1]) return True with tornado.stack_context.ExceptionStackContext(timeout_handler): self.local.pub_async(data['tgt'], data['fun'], data['arg'], data['tgt_type'], data['ret'], data['jid'], data['to'], io_loop=self.io_loop, callback=lambda _: None, **kwargs) def fire_master_syndic_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to false in Sodium release. self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'syndic_start', sync=False, ) self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'syndic'), sync=False, ) # TODO: clean up docs def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) # add handler to subscriber self.pub_channel.on_recv(self._process_cmd_socket) def _process_cmd_socket(self, payload): if payload is not None and payload['enc'] == 'aes': log.trace('Handling payload') self._handle_decoded_payload(payload['load']) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the syndic currently has no need. @tornado.gen.coroutine def reconnect(self): if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication master, self.pub_channel = yield self.eval_master(opts=self.opts) if self.connected: self.opts['master'] = master self.pub_channel.on_recv(self._process_cmd_socket) log.info('Minion is ready to receive requests!') raise tornado.gen.Return(self) def destroy(self): ''' Tear down the syndic minion ''' # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local if hasattr(self, 'forward_events'): self.forward_events.stop()
saltstack/salt
salt/minion.py
Syndic.syndic_cmd
python
def syndic_cmd(self, data): ''' Take the now clear load and forward it on to the client cmd ''' # Set up default tgt_type if 'tgt_type' not in data: data['tgt_type'] = 'glob' kwargs = {} # optionally add a few fields to the publish data for field in ('master_id', # which master the job came from 'user', # which user ran the job ): if field in data: kwargs[field] = data[field] def timeout_handler(*args): log.warning('Unable to forward pub data: %s', args[1]) return True with tornado.stack_context.ExceptionStackContext(timeout_handler): self.local.pub_async(data['tgt'], data['fun'], data['arg'], data['tgt_type'], data['ret'], data['jid'], data['to'], io_loop=self.io_loop, callback=lambda _: None, **kwargs)
Take the now clear load and forward it on to the client cmd
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L2980-L3010
null
class Syndic(Minion): ''' Make a Syndic minion, this minion will use the minion keys on the master to authenticate with a higher level master. ''' def __init__(self, opts, **kwargs): self._syndic_interface = opts.get('interface') self._syndic = True # force auth_safemode True because Syndic don't support autorestart opts['auth_safemode'] = True opts['loop_interval'] = 1 super(Syndic, self).__init__(opts, **kwargs) self.mminion = salt.minion.MasterMinion(opts) self.jid_forward_cache = set() self.jids = {} self.raw_events = [] self.pub_future = None def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # TODO: even do this?? data['to'] = int(data.get('to', self.opts['timeout'])) - 1 # Only forward the command if it didn't originate from ourselves if data.get('master_id', 0) != self.opts.get('master_id', 1): self.syndic_cmd(data) def fire_master_syndic_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to false in Sodium release. self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'syndic_start', sync=False, ) self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'syndic'), sync=False, ) # TODO: clean up docs def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) # add handler to subscriber self.pub_channel.on_recv(self._process_cmd_socket) def _process_cmd_socket(self, payload): if payload is not None and payload['enc'] == 'aes': log.trace('Handling payload') self._handle_decoded_payload(payload['load']) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the syndic currently has no need. @tornado.gen.coroutine def reconnect(self): if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication master, self.pub_channel = yield self.eval_master(opts=self.opts) if self.connected: self.opts['master'] = master self.pub_channel.on_recv(self._process_cmd_socket) log.info('Minion is ready to receive requests!') raise tornado.gen.Return(self) def destroy(self): ''' Tear down the syndic minion ''' # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local if hasattr(self, 'forward_events'): self.forward_events.stop()
saltstack/salt
salt/minion.py
Syndic.tune_in_no_block
python
def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) # add handler to subscriber self.pub_channel.on_recv(self._process_cmd_socket)
Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L3034-L3045
[ "def get_local_client(\n c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),\n mopts=None,\n skip_perm_errors=False,\n io_loop=None,\n auto_reconnect=False):\n '''\n .. versionadded:: 2014.7.0\n\n Read in the config and return the correct LocalClient object based on\n the configured transport\n\n :param IOLoop io_loop: io_loop used for events.\n Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n if mopts:\n opts = mopts\n else:\n # Late import to prevent circular import\n import salt.config\n opts = salt.config.client_config(c_path)\n\n # TODO: AIO core is separate from transport\n return LocalClient(\n mopts=opts,\n skip_perm_errors=skip_perm_errors,\n io_loop=io_loop,\n auto_reconnect=auto_reconnect)\n" ]
class Syndic(Minion): ''' Make a Syndic minion, this minion will use the minion keys on the master to authenticate with a higher level master. ''' def __init__(self, opts, **kwargs): self._syndic_interface = opts.get('interface') self._syndic = True # force auth_safemode True because Syndic don't support autorestart opts['auth_safemode'] = True opts['loop_interval'] = 1 super(Syndic, self).__init__(opts, **kwargs) self.mminion = salt.minion.MasterMinion(opts) self.jid_forward_cache = set() self.jids = {} self.raw_events = [] self.pub_future = None def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # TODO: even do this?? data['to'] = int(data.get('to', self.opts['timeout'])) - 1 # Only forward the command if it didn't originate from ourselves if data.get('master_id', 0) != self.opts.get('master_id', 1): self.syndic_cmd(data) def syndic_cmd(self, data): ''' Take the now clear load and forward it on to the client cmd ''' # Set up default tgt_type if 'tgt_type' not in data: data['tgt_type'] = 'glob' kwargs = {} # optionally add a few fields to the publish data for field in ('master_id', # which master the job came from 'user', # which user ran the job ): if field in data: kwargs[field] = data[field] def timeout_handler(*args): log.warning('Unable to forward pub data: %s', args[1]) return True with tornado.stack_context.ExceptionStackContext(timeout_handler): self.local.pub_async(data['tgt'], data['fun'], data['arg'], data['tgt_type'], data['ret'], data['jid'], data['to'], io_loop=self.io_loop, callback=lambda _: None, **kwargs) def fire_master_syndic_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to false in Sodium release. self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'syndic_start', sync=False, ) self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'syndic'), sync=False, ) # TODO: clean up docs def _process_cmd_socket(self, payload): if payload is not None and payload['enc'] == 'aes': log.trace('Handling payload') self._handle_decoded_payload(payload['load']) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the syndic currently has no need. @tornado.gen.coroutine def reconnect(self): if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication master, self.pub_channel = yield self.eval_master(opts=self.opts) if self.connected: self.opts['master'] = master self.pub_channel.on_recv(self._process_cmd_socket) log.info('Minion is ready to receive requests!') raise tornado.gen.Return(self) def destroy(self): ''' Tear down the syndic minion ''' # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local if hasattr(self, 'forward_events'): self.forward_events.stop()
saltstack/salt
salt/minion.py
Syndic.destroy
python
def destroy(self): ''' Tear down the syndic minion ''' # We borrowed the local clients poller so give it back before # it's destroyed. Reset the local poller reference. super(Syndic, self).destroy() if hasattr(self, 'local'): del self.local if hasattr(self, 'forward_events'): self.forward_events.stop()
Tear down the syndic minion
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L3074-L3085
[ "def destroy(self):\n '''\n Tear down the minion\n '''\n if self._running is False:\n return\n\n self._running = False\n if hasattr(self, 'schedule'):\n del self.schedule\n if hasattr(self, 'pub_channel') and self.pub_channel is not None:\n self.pub_channel.on_recv(None)\n if hasattr(self.pub_channel, 'close'):\n self.pub_channel.close()\n del self.pub_channel\n if hasattr(self, 'periodic_callbacks'):\n for cb in six.itervalues(self.periodic_callbacks):\n cb.stop()\n" ]
class Syndic(Minion): ''' Make a Syndic minion, this minion will use the minion keys on the master to authenticate with a higher level master. ''' def __init__(self, opts, **kwargs): self._syndic_interface = opts.get('interface') self._syndic = True # force auth_safemode True because Syndic don't support autorestart opts['auth_safemode'] = True opts['loop_interval'] = 1 super(Syndic, self).__init__(opts, **kwargs) self.mminion = salt.minion.MasterMinion(opts) self.jid_forward_cache = set() self.jids = {} self.raw_events = [] self.pub_future = None def _handle_decoded_payload(self, data): ''' Override this method if you wish to handle the decoded data differently. ''' # TODO: even do this?? data['to'] = int(data.get('to', self.opts['timeout'])) - 1 # Only forward the command if it didn't originate from ourselves if data.get('master_id', 0) != self.opts.get('master_id', 1): self.syndic_cmd(data) def syndic_cmd(self, data): ''' Take the now clear load and forward it on to the client cmd ''' # Set up default tgt_type if 'tgt_type' not in data: data['tgt_type'] = 'glob' kwargs = {} # optionally add a few fields to the publish data for field in ('master_id', # which master the job came from 'user', # which user ran the job ): if field in data: kwargs[field] = data[field] def timeout_handler(*args): log.warning('Unable to forward pub data: %s', args[1]) return True with tornado.stack_context.ExceptionStackContext(timeout_handler): self.local.pub_async(data['tgt'], data['fun'], data['arg'], data['tgt_type'], data['ret'], data['jid'], data['to'], io_loop=self.io_loop, callback=lambda _: None, **kwargs) def fire_master_syndic_start(self): # Send an event to the master that the minion is live if self.opts['enable_legacy_startup_events']: # Old style event. Defaults to false in Sodium release. self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), 'syndic_start', sync=False, ) self._fire_master( 'Syndic {0} started at {1}'.format( self.opts['id'], time.asctime() ), tagify([self.opts['id'], 'start'], 'syndic'), sync=False, ) # TODO: clean up docs def tune_in_no_block(self): ''' Executes the tune_in sequence but omits extra logging and the management of the event bus assuming that these are handled outside the tune_in sequence ''' # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) # add handler to subscriber self.pub_channel.on_recv(self._process_cmd_socket) def _process_cmd_socket(self, payload): if payload is not None and payload['enc'] == 'aes': log.trace('Handling payload') self._handle_decoded_payload(payload['load']) # If it's not AES, and thus has not been verified, we do nothing. # In the future, we could add support for some clearfuncs, but # the syndic currently has no need. @tornado.gen.coroutine def reconnect(self): if hasattr(self, 'pub_channel'): self.pub_channel.on_recv(None) if hasattr(self.pub_channel, 'close'): self.pub_channel.close() del self.pub_channel # if eval_master finds a new master for us, self.connected # will be True again on successful master authentication master, self.pub_channel = yield self.eval_master(opts=self.opts) if self.connected: self.opts['master'] = master self.pub_channel.on_recv(self._process_cmd_socket) log.info('Minion is ready to receive requests!') raise tornado.gen.Return(self)
saltstack/salt
salt/minion.py
SyndicManager._spawn_syndics
python
def _spawn_syndics(self): ''' Spawn all the coroutines which will sign in the syndics ''' self._syndics = OrderedDict() # mapping of opts['master'] -> syndic masters = self.opts['master'] if not isinstance(masters, list): masters = [masters] for master in masters: s_opts = copy.copy(self.opts) s_opts['master'] = master self._syndics[master] = self._connect_syndic(s_opts)
Spawn all the coroutines which will sign in the syndics
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L3142-L3153
null
class SyndicManager(MinionBase): ''' Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Modes (controlled by `syndic_mode`: sync: This mode will synchronize all events and publishes from higher level masters cluster: This mode will only sync job publishes and returns Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~15s trying to forward events to the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts, io_loop=None): opts['loop_interval'] = 1 super(SyndicManager, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) self.syndic_mode = self.opts.get('syndic_mode', 'sync') self.syndic_failover = self.opts.get('syndic_failover', 'random') self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self._has_master = threading.Event() self.jid_forward_cache = set() if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # List of events self.raw_events = [] # Dict of rets: {master_id: {event_tag: job_ret, ...}, ...} self.job_rets = {} # List of delayed job_rets which was unable to send for some reason and will be resend to # any available master self.delayed = [] # Active pub futures: {master_id: (future, [job_ret, ...]), ...} self.pub_futures = {} @tornado.gen.coroutine def _connect_syndic(self, opts): ''' Create a syndic, and asynchronously connect it to a master ''' auth_wait = opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? log.debug( 'Syndic attempting to connect to %s', opts['master'] ) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, io_loop=self.io_loop, ) yield syndic.connect_master(failed=failed) # set up the syndic to handle publishes (specifically not event forwarding) syndic.tune_in_no_block() # Send an event to the master that the minion is live syndic.fire_master_syndic_start() log.info( 'Syndic successfully connected to %s', opts['master'] ) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up syndic for multi-syndic. Is the ' 'master at %s responding?', opts['master'] ) except (KeyboardInterrupt, SystemExit): raise except Exception: failed = True log.critical( 'Unexpected error while connecting to %s', opts['master'], exc_info=True ) raise tornado.gen.Return(syndic) def _mark_master_dead(self, master): ''' Mark a master as dead. This will start the sign-in routine ''' # if its connected, mark it dead if self._syndics[master].done(): syndic = self._syndics[master].result() # pylint: disable=no-member self._syndics[master] = syndic.reconnect() else: # TODO: debug? log.info( 'Attempting to mark %s as dead, although it is already ' 'marked dead', master ) def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} successful = False # Call for each master for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) successful = True except SaltClientError: log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) if not successful: log.critical('Unable to call %s on any masters!', func) def _return_pub_syndic(self, values, master_id=None): ''' Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for ''' func = '_return_pub_multi' for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue future, data = self.pub_futures.get(master, (None, None)) if future is not None: if not future.done(): if master == master_id: # Targeted master previous send not done yet, call again later return False else: # Fallback master is busy, try the next one continue elif future.exception(): # Previous execution on this master returned an error log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) del self.pub_futures[master] # Add not sent data to the delayed list and try the next master self.delayed.extend(data) continue future = getattr(syndic_future.result(), func)(values, '_syndic_return', timeout=self._return_retry_timer(), sync=False) self.pub_futures[master] = (future, values) return True # Loop done and didn't exit: wasn't sent, try again later return False def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self._syndics.keys()) if self.opts['syndic_failover'] == 'random': shuffle(masters) if master_id not in self._syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self._syndics[master_id] if not masters: break master_id = masters.pop(0) def _reset_event_aggregation(self): self.job_rets = {} self.raw_events = [] def reconnect_event_bus(self, something): future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' self._spawn_syndics() # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) self.local.event.subscribe('') log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id']) # register the event sub to the poller self.job_rets = {} self.raw_events = [] self._reset_event_aggregation() future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, self.opts['syndic_event_forward_timeout'] * 1000, ) self.forward_events.start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() self.io_loop.start() def _process_event(self, raw): # TODO: cleanup: Move down into event class mtag, data = self.local.event.unpack(raw, self.local.event.serial) log.trace('Got event %s', mtag) # pylint: disable=no-member tag_parts = mtag.split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in data: if 'jid' not in data: # Not a job return return if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1): log.debug('Return received with matching master_id, not forwarding') return master = data.get('master_id') jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {}) if not jdict: jdict['__fun__'] = data.get('fun') jdict['__jid__'] = data['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if data['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](data['jid']) ) self.jid_forward_cache.add(data['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if master is not None: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = master ret = {} for key in 'return', 'retcode', 'success': if key in data: ret[key] = data[key] jdict[data['id']] = ret else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events if self.syndic_mode == 'sync': # Add generic event aggregation here if 'retcode' not in data: self.raw_events.append({'data': data, 'tag': mtag}) def _forward_events(self): log.trace('Forwarding events') # pylint: disable=no-member if self.raw_events: events = self.raw_events self.raw_events = [] self._call_syndic('_fire_master', kwargs={'events': events, 'pretag': tagify(self.opts['id'], base='syndic'), 'timeout': self._return_retry_timer(), 'sync': False, }, ) if self.delayed: res = self._return_pub_syndic(self.delayed) if res: self.delayed = [] for master in list(six.iterkeys(self.job_rets)): values = list(six.itervalues(self.job_rets[master])) res = self._return_pub_syndic(values, master_id=master) if res: del self.job_rets[master]
saltstack/salt
salt/minion.py
SyndicManager._connect_syndic
python
def _connect_syndic(self, opts): ''' Create a syndic, and asynchronously connect it to a master ''' auth_wait = opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? log.debug( 'Syndic attempting to connect to %s', opts['master'] ) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, io_loop=self.io_loop, ) yield syndic.connect_master(failed=failed) # set up the syndic to handle publishes (specifically not event forwarding) syndic.tune_in_no_block() # Send an event to the master that the minion is live syndic.fire_master_syndic_start() log.info( 'Syndic successfully connected to %s', opts['master'] ) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up syndic for multi-syndic. Is the ' 'master at %s responding?', opts['master'] ) except (KeyboardInterrupt, SystemExit): raise except Exception: failed = True log.critical( 'Unexpected error while connecting to %s', opts['master'], exc_info=True ) raise tornado.gen.Return(syndic)
Create a syndic, and asynchronously connect it to a master
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L3156-L3210
null
class SyndicManager(MinionBase): ''' Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Modes (controlled by `syndic_mode`: sync: This mode will synchronize all events and publishes from higher level masters cluster: This mode will only sync job publishes and returns Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~15s trying to forward events to the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts, io_loop=None): opts['loop_interval'] = 1 super(SyndicManager, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) self.syndic_mode = self.opts.get('syndic_mode', 'sync') self.syndic_failover = self.opts.get('syndic_failover', 'random') self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self._has_master = threading.Event() self.jid_forward_cache = set() if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # List of events self.raw_events = [] # Dict of rets: {master_id: {event_tag: job_ret, ...}, ...} self.job_rets = {} # List of delayed job_rets which was unable to send for some reason and will be resend to # any available master self.delayed = [] # Active pub futures: {master_id: (future, [job_ret, ...]), ...} self.pub_futures = {} def _spawn_syndics(self): ''' Spawn all the coroutines which will sign in the syndics ''' self._syndics = OrderedDict() # mapping of opts['master'] -> syndic masters = self.opts['master'] if not isinstance(masters, list): masters = [masters] for master in masters: s_opts = copy.copy(self.opts) s_opts['master'] = master self._syndics[master] = self._connect_syndic(s_opts) @tornado.gen.coroutine def _mark_master_dead(self, master): ''' Mark a master as dead. This will start the sign-in routine ''' # if its connected, mark it dead if self._syndics[master].done(): syndic = self._syndics[master].result() # pylint: disable=no-member self._syndics[master] = syndic.reconnect() else: # TODO: debug? log.info( 'Attempting to mark %s as dead, although it is already ' 'marked dead', master ) def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} successful = False # Call for each master for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) successful = True except SaltClientError: log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) if not successful: log.critical('Unable to call %s on any masters!', func) def _return_pub_syndic(self, values, master_id=None): ''' Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for ''' func = '_return_pub_multi' for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue future, data = self.pub_futures.get(master, (None, None)) if future is not None: if not future.done(): if master == master_id: # Targeted master previous send not done yet, call again later return False else: # Fallback master is busy, try the next one continue elif future.exception(): # Previous execution on this master returned an error log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) del self.pub_futures[master] # Add not sent data to the delayed list and try the next master self.delayed.extend(data) continue future = getattr(syndic_future.result(), func)(values, '_syndic_return', timeout=self._return_retry_timer(), sync=False) self.pub_futures[master] = (future, values) return True # Loop done and didn't exit: wasn't sent, try again later return False def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self._syndics.keys()) if self.opts['syndic_failover'] == 'random': shuffle(masters) if master_id not in self._syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self._syndics[master_id] if not masters: break master_id = masters.pop(0) def _reset_event_aggregation(self): self.job_rets = {} self.raw_events = [] def reconnect_event_bus(self, something): future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' self._spawn_syndics() # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) self.local.event.subscribe('') log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id']) # register the event sub to the poller self.job_rets = {} self.raw_events = [] self._reset_event_aggregation() future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, self.opts['syndic_event_forward_timeout'] * 1000, ) self.forward_events.start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() self.io_loop.start() def _process_event(self, raw): # TODO: cleanup: Move down into event class mtag, data = self.local.event.unpack(raw, self.local.event.serial) log.trace('Got event %s', mtag) # pylint: disable=no-member tag_parts = mtag.split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in data: if 'jid' not in data: # Not a job return return if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1): log.debug('Return received with matching master_id, not forwarding') return master = data.get('master_id') jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {}) if not jdict: jdict['__fun__'] = data.get('fun') jdict['__jid__'] = data['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if data['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](data['jid']) ) self.jid_forward_cache.add(data['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if master is not None: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = master ret = {} for key in 'return', 'retcode', 'success': if key in data: ret[key] = data[key] jdict[data['id']] = ret else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events if self.syndic_mode == 'sync': # Add generic event aggregation here if 'retcode' not in data: self.raw_events.append({'data': data, 'tag': mtag}) def _forward_events(self): log.trace('Forwarding events') # pylint: disable=no-member if self.raw_events: events = self.raw_events self.raw_events = [] self._call_syndic('_fire_master', kwargs={'events': events, 'pretag': tagify(self.opts['id'], base='syndic'), 'timeout': self._return_retry_timer(), 'sync': False, }, ) if self.delayed: res = self._return_pub_syndic(self.delayed) if res: self.delayed = [] for master in list(six.iterkeys(self.job_rets)): values = list(six.itervalues(self.job_rets[master])) res = self._return_pub_syndic(values, master_id=master) if res: del self.job_rets[master]
saltstack/salt
salt/minion.py
SyndicManager._mark_master_dead
python
def _mark_master_dead(self, master): ''' Mark a master as dead. This will start the sign-in routine ''' # if its connected, mark it dead if self._syndics[master].done(): syndic = self._syndics[master].result() # pylint: disable=no-member self._syndics[master] = syndic.reconnect() else: # TODO: debug? log.info( 'Attempting to mark %s as dead, although it is already ' 'marked dead', master )
Mark a master as dead. This will start the sign-in routine
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L3212-L3225
null
class SyndicManager(MinionBase): ''' Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Modes (controlled by `syndic_mode`: sync: This mode will synchronize all events and publishes from higher level masters cluster: This mode will only sync job publishes and returns Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~15s trying to forward events to the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts, io_loop=None): opts['loop_interval'] = 1 super(SyndicManager, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) self.syndic_mode = self.opts.get('syndic_mode', 'sync') self.syndic_failover = self.opts.get('syndic_failover', 'random') self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self._has_master = threading.Event() self.jid_forward_cache = set() if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # List of events self.raw_events = [] # Dict of rets: {master_id: {event_tag: job_ret, ...}, ...} self.job_rets = {} # List of delayed job_rets which was unable to send for some reason and will be resend to # any available master self.delayed = [] # Active pub futures: {master_id: (future, [job_ret, ...]), ...} self.pub_futures = {} def _spawn_syndics(self): ''' Spawn all the coroutines which will sign in the syndics ''' self._syndics = OrderedDict() # mapping of opts['master'] -> syndic masters = self.opts['master'] if not isinstance(masters, list): masters = [masters] for master in masters: s_opts = copy.copy(self.opts) s_opts['master'] = master self._syndics[master] = self._connect_syndic(s_opts) @tornado.gen.coroutine def _connect_syndic(self, opts): ''' Create a syndic, and asynchronously connect it to a master ''' auth_wait = opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? log.debug( 'Syndic attempting to connect to %s', opts['master'] ) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, io_loop=self.io_loop, ) yield syndic.connect_master(failed=failed) # set up the syndic to handle publishes (specifically not event forwarding) syndic.tune_in_no_block() # Send an event to the master that the minion is live syndic.fire_master_syndic_start() log.info( 'Syndic successfully connected to %s', opts['master'] ) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up syndic for multi-syndic. Is the ' 'master at %s responding?', opts['master'] ) except (KeyboardInterrupt, SystemExit): raise except Exception: failed = True log.critical( 'Unexpected error while connecting to %s', opts['master'], exc_info=True ) raise tornado.gen.Return(syndic) def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} successful = False # Call for each master for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) successful = True except SaltClientError: log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) if not successful: log.critical('Unable to call %s on any masters!', func) def _return_pub_syndic(self, values, master_id=None): ''' Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for ''' func = '_return_pub_multi' for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue future, data = self.pub_futures.get(master, (None, None)) if future is not None: if not future.done(): if master == master_id: # Targeted master previous send not done yet, call again later return False else: # Fallback master is busy, try the next one continue elif future.exception(): # Previous execution on this master returned an error log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) del self.pub_futures[master] # Add not sent data to the delayed list and try the next master self.delayed.extend(data) continue future = getattr(syndic_future.result(), func)(values, '_syndic_return', timeout=self._return_retry_timer(), sync=False) self.pub_futures[master] = (future, values) return True # Loop done and didn't exit: wasn't sent, try again later return False def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self._syndics.keys()) if self.opts['syndic_failover'] == 'random': shuffle(masters) if master_id not in self._syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self._syndics[master_id] if not masters: break master_id = masters.pop(0) def _reset_event_aggregation(self): self.job_rets = {} self.raw_events = [] def reconnect_event_bus(self, something): future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' self._spawn_syndics() # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) self.local.event.subscribe('') log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id']) # register the event sub to the poller self.job_rets = {} self.raw_events = [] self._reset_event_aggregation() future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, self.opts['syndic_event_forward_timeout'] * 1000, ) self.forward_events.start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() self.io_loop.start() def _process_event(self, raw): # TODO: cleanup: Move down into event class mtag, data = self.local.event.unpack(raw, self.local.event.serial) log.trace('Got event %s', mtag) # pylint: disable=no-member tag_parts = mtag.split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in data: if 'jid' not in data: # Not a job return return if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1): log.debug('Return received with matching master_id, not forwarding') return master = data.get('master_id') jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {}) if not jdict: jdict['__fun__'] = data.get('fun') jdict['__jid__'] = data['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if data['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](data['jid']) ) self.jid_forward_cache.add(data['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if master is not None: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = master ret = {} for key in 'return', 'retcode', 'success': if key in data: ret[key] = data[key] jdict[data['id']] = ret else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events if self.syndic_mode == 'sync': # Add generic event aggregation here if 'retcode' not in data: self.raw_events.append({'data': data, 'tag': mtag}) def _forward_events(self): log.trace('Forwarding events') # pylint: disable=no-member if self.raw_events: events = self.raw_events self.raw_events = [] self._call_syndic('_fire_master', kwargs={'events': events, 'pretag': tagify(self.opts['id'], base='syndic'), 'timeout': self._return_retry_timer(), 'sync': False, }, ) if self.delayed: res = self._return_pub_syndic(self.delayed) if res: self.delayed = [] for master in list(six.iterkeys(self.job_rets)): values = list(six.itervalues(self.job_rets[master])) res = self._return_pub_syndic(values, master_id=master) if res: del self.job_rets[master]
saltstack/salt
salt/minion.py
SyndicManager._call_syndic
python
def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} successful = False # Call for each master for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) successful = True except SaltClientError: log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) if not successful: log.critical('Unable to call %s on any masters!', func)
Wrapper to call a given func on a syndic, best effort to get the one you asked for
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L3227-L3253
null
class SyndicManager(MinionBase): ''' Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Modes (controlled by `syndic_mode`: sync: This mode will synchronize all events and publishes from higher level masters cluster: This mode will only sync job publishes and returns Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~15s trying to forward events to the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts, io_loop=None): opts['loop_interval'] = 1 super(SyndicManager, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) self.syndic_mode = self.opts.get('syndic_mode', 'sync') self.syndic_failover = self.opts.get('syndic_failover', 'random') self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self._has_master = threading.Event() self.jid_forward_cache = set() if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # List of events self.raw_events = [] # Dict of rets: {master_id: {event_tag: job_ret, ...}, ...} self.job_rets = {} # List of delayed job_rets which was unable to send for some reason and will be resend to # any available master self.delayed = [] # Active pub futures: {master_id: (future, [job_ret, ...]), ...} self.pub_futures = {} def _spawn_syndics(self): ''' Spawn all the coroutines which will sign in the syndics ''' self._syndics = OrderedDict() # mapping of opts['master'] -> syndic masters = self.opts['master'] if not isinstance(masters, list): masters = [masters] for master in masters: s_opts = copy.copy(self.opts) s_opts['master'] = master self._syndics[master] = self._connect_syndic(s_opts) @tornado.gen.coroutine def _connect_syndic(self, opts): ''' Create a syndic, and asynchronously connect it to a master ''' auth_wait = opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? log.debug( 'Syndic attempting to connect to %s', opts['master'] ) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, io_loop=self.io_loop, ) yield syndic.connect_master(failed=failed) # set up the syndic to handle publishes (specifically not event forwarding) syndic.tune_in_no_block() # Send an event to the master that the minion is live syndic.fire_master_syndic_start() log.info( 'Syndic successfully connected to %s', opts['master'] ) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up syndic for multi-syndic. Is the ' 'master at %s responding?', opts['master'] ) except (KeyboardInterrupt, SystemExit): raise except Exception: failed = True log.critical( 'Unexpected error while connecting to %s', opts['master'], exc_info=True ) raise tornado.gen.Return(syndic) def _mark_master_dead(self, master): ''' Mark a master as dead. This will start the sign-in routine ''' # if its connected, mark it dead if self._syndics[master].done(): syndic = self._syndics[master].result() # pylint: disable=no-member self._syndics[master] = syndic.reconnect() else: # TODO: debug? log.info( 'Attempting to mark %s as dead, although it is already ' 'marked dead', master ) def _return_pub_syndic(self, values, master_id=None): ''' Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for ''' func = '_return_pub_multi' for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue future, data = self.pub_futures.get(master, (None, None)) if future is not None: if not future.done(): if master == master_id: # Targeted master previous send not done yet, call again later return False else: # Fallback master is busy, try the next one continue elif future.exception(): # Previous execution on this master returned an error log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) del self.pub_futures[master] # Add not sent data to the delayed list and try the next master self.delayed.extend(data) continue future = getattr(syndic_future.result(), func)(values, '_syndic_return', timeout=self._return_retry_timer(), sync=False) self.pub_futures[master] = (future, values) return True # Loop done and didn't exit: wasn't sent, try again later return False def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self._syndics.keys()) if self.opts['syndic_failover'] == 'random': shuffle(masters) if master_id not in self._syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self._syndics[master_id] if not masters: break master_id = masters.pop(0) def _reset_event_aggregation(self): self.job_rets = {} self.raw_events = [] def reconnect_event_bus(self, something): future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' self._spawn_syndics() # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) self.local.event.subscribe('') log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id']) # register the event sub to the poller self.job_rets = {} self.raw_events = [] self._reset_event_aggregation() future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, self.opts['syndic_event_forward_timeout'] * 1000, ) self.forward_events.start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() self.io_loop.start() def _process_event(self, raw): # TODO: cleanup: Move down into event class mtag, data = self.local.event.unpack(raw, self.local.event.serial) log.trace('Got event %s', mtag) # pylint: disable=no-member tag_parts = mtag.split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in data: if 'jid' not in data: # Not a job return return if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1): log.debug('Return received with matching master_id, not forwarding') return master = data.get('master_id') jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {}) if not jdict: jdict['__fun__'] = data.get('fun') jdict['__jid__'] = data['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if data['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](data['jid']) ) self.jid_forward_cache.add(data['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if master is not None: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = master ret = {} for key in 'return', 'retcode', 'success': if key in data: ret[key] = data[key] jdict[data['id']] = ret else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events if self.syndic_mode == 'sync': # Add generic event aggregation here if 'retcode' not in data: self.raw_events.append({'data': data, 'tag': mtag}) def _forward_events(self): log.trace('Forwarding events') # pylint: disable=no-member if self.raw_events: events = self.raw_events self.raw_events = [] self._call_syndic('_fire_master', kwargs={'events': events, 'pretag': tagify(self.opts['id'], base='syndic'), 'timeout': self._return_retry_timer(), 'sync': False, }, ) if self.delayed: res = self._return_pub_syndic(self.delayed) if res: self.delayed = [] for master in list(six.iterkeys(self.job_rets)): values = list(six.itervalues(self.job_rets[master])) res = self._return_pub_syndic(values, master_id=master) if res: del self.job_rets[master]
saltstack/salt
salt/minion.py
SyndicManager._return_pub_syndic
python
def _return_pub_syndic(self, values, master_id=None): ''' Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for ''' func = '_return_pub_multi' for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue future, data = self.pub_futures.get(master, (None, None)) if future is not None: if not future.done(): if master == master_id: # Targeted master previous send not done yet, call again later return False else: # Fallback master is busy, try the next one continue elif future.exception(): # Previous execution on this master returned an error log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) del self.pub_futures[master] # Add not sent data to the delayed list and try the next master self.delayed.extend(data) continue future = getattr(syndic_future.result(), func)(values, '_syndic_return', timeout=self._return_retry_timer(), sync=False) self.pub_futures[master] = (future, values) return True # Loop done and didn't exit: wasn't sent, try again later return False
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L3255-L3295
null
class SyndicManager(MinionBase): ''' Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Modes (controlled by `syndic_mode`: sync: This mode will synchronize all events and publishes from higher level masters cluster: This mode will only sync job publishes and returns Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~15s trying to forward events to the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts, io_loop=None): opts['loop_interval'] = 1 super(SyndicManager, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) self.syndic_mode = self.opts.get('syndic_mode', 'sync') self.syndic_failover = self.opts.get('syndic_failover', 'random') self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self._has_master = threading.Event() self.jid_forward_cache = set() if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # List of events self.raw_events = [] # Dict of rets: {master_id: {event_tag: job_ret, ...}, ...} self.job_rets = {} # List of delayed job_rets which was unable to send for some reason and will be resend to # any available master self.delayed = [] # Active pub futures: {master_id: (future, [job_ret, ...]), ...} self.pub_futures = {} def _spawn_syndics(self): ''' Spawn all the coroutines which will sign in the syndics ''' self._syndics = OrderedDict() # mapping of opts['master'] -> syndic masters = self.opts['master'] if not isinstance(masters, list): masters = [masters] for master in masters: s_opts = copy.copy(self.opts) s_opts['master'] = master self._syndics[master] = self._connect_syndic(s_opts) @tornado.gen.coroutine def _connect_syndic(self, opts): ''' Create a syndic, and asynchronously connect it to a master ''' auth_wait = opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? log.debug( 'Syndic attempting to connect to %s', opts['master'] ) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, io_loop=self.io_loop, ) yield syndic.connect_master(failed=failed) # set up the syndic to handle publishes (specifically not event forwarding) syndic.tune_in_no_block() # Send an event to the master that the minion is live syndic.fire_master_syndic_start() log.info( 'Syndic successfully connected to %s', opts['master'] ) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up syndic for multi-syndic. Is the ' 'master at %s responding?', opts['master'] ) except (KeyboardInterrupt, SystemExit): raise except Exception: failed = True log.critical( 'Unexpected error while connecting to %s', opts['master'], exc_info=True ) raise tornado.gen.Return(syndic) def _mark_master_dead(self, master): ''' Mark a master as dead. This will start the sign-in routine ''' # if its connected, mark it dead if self._syndics[master].done(): syndic = self._syndics[master].result() # pylint: disable=no-member self._syndics[master] = syndic.reconnect() else: # TODO: debug? log.info( 'Attempting to mark %s as dead, although it is already ' 'marked dead', master ) def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} successful = False # Call for each master for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) successful = True except SaltClientError: log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) if not successful: log.critical('Unable to call %s on any masters!', func) def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self._syndics.keys()) if self.opts['syndic_failover'] == 'random': shuffle(masters) if master_id not in self._syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self._syndics[master_id] if not masters: break master_id = masters.pop(0) def _reset_event_aggregation(self): self.job_rets = {} self.raw_events = [] def reconnect_event_bus(self, something): future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' self._spawn_syndics() # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) self.local.event.subscribe('') log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id']) # register the event sub to the poller self.job_rets = {} self.raw_events = [] self._reset_event_aggregation() future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, self.opts['syndic_event_forward_timeout'] * 1000, ) self.forward_events.start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() self.io_loop.start() def _process_event(self, raw): # TODO: cleanup: Move down into event class mtag, data = self.local.event.unpack(raw, self.local.event.serial) log.trace('Got event %s', mtag) # pylint: disable=no-member tag_parts = mtag.split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in data: if 'jid' not in data: # Not a job return return if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1): log.debug('Return received with matching master_id, not forwarding') return master = data.get('master_id') jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {}) if not jdict: jdict['__fun__'] = data.get('fun') jdict['__jid__'] = data['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if data['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](data['jid']) ) self.jid_forward_cache.add(data['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if master is not None: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = master ret = {} for key in 'return', 'retcode', 'success': if key in data: ret[key] = data[key] jdict[data['id']] = ret else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events if self.syndic_mode == 'sync': # Add generic event aggregation here if 'retcode' not in data: self.raw_events.append({'data': data, 'tag': mtag}) def _forward_events(self): log.trace('Forwarding events') # pylint: disable=no-member if self.raw_events: events = self.raw_events self.raw_events = [] self._call_syndic('_fire_master', kwargs={'events': events, 'pretag': tagify(self.opts['id'], base='syndic'), 'timeout': self._return_retry_timer(), 'sync': False, }, ) if self.delayed: res = self._return_pub_syndic(self.delayed) if res: self.delayed = [] for master in list(six.iterkeys(self.job_rets)): values = list(six.itervalues(self.job_rets[master])) res = self._return_pub_syndic(values, master_id=master) if res: del self.job_rets[master]
saltstack/salt
salt/minion.py
SyndicManager.iter_master_options
python
def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self._syndics.keys()) if self.opts['syndic_failover'] == 'random': shuffle(masters) if master_id not in self._syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self._syndics[master_id] if not masters: break master_id = masters.pop(0)
Iterate (in order) over your options for master
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L3297-L3313
null
class SyndicManager(MinionBase): ''' Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Modes (controlled by `syndic_mode`: sync: This mode will synchronize all events and publishes from higher level masters cluster: This mode will only sync job publishes and returns Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~15s trying to forward events to the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts, io_loop=None): opts['loop_interval'] = 1 super(SyndicManager, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) self.syndic_mode = self.opts.get('syndic_mode', 'sync') self.syndic_failover = self.opts.get('syndic_failover', 'random') self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self._has_master = threading.Event() self.jid_forward_cache = set() if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # List of events self.raw_events = [] # Dict of rets: {master_id: {event_tag: job_ret, ...}, ...} self.job_rets = {} # List of delayed job_rets which was unable to send for some reason and will be resend to # any available master self.delayed = [] # Active pub futures: {master_id: (future, [job_ret, ...]), ...} self.pub_futures = {} def _spawn_syndics(self): ''' Spawn all the coroutines which will sign in the syndics ''' self._syndics = OrderedDict() # mapping of opts['master'] -> syndic masters = self.opts['master'] if not isinstance(masters, list): masters = [masters] for master in masters: s_opts = copy.copy(self.opts) s_opts['master'] = master self._syndics[master] = self._connect_syndic(s_opts) @tornado.gen.coroutine def _connect_syndic(self, opts): ''' Create a syndic, and asynchronously connect it to a master ''' auth_wait = opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? log.debug( 'Syndic attempting to connect to %s', opts['master'] ) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, io_loop=self.io_loop, ) yield syndic.connect_master(failed=failed) # set up the syndic to handle publishes (specifically not event forwarding) syndic.tune_in_no_block() # Send an event to the master that the minion is live syndic.fire_master_syndic_start() log.info( 'Syndic successfully connected to %s', opts['master'] ) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up syndic for multi-syndic. Is the ' 'master at %s responding?', opts['master'] ) except (KeyboardInterrupt, SystemExit): raise except Exception: failed = True log.critical( 'Unexpected error while connecting to %s', opts['master'], exc_info=True ) raise tornado.gen.Return(syndic) def _mark_master_dead(self, master): ''' Mark a master as dead. This will start the sign-in routine ''' # if its connected, mark it dead if self._syndics[master].done(): syndic = self._syndics[master].result() # pylint: disable=no-member self._syndics[master] = syndic.reconnect() else: # TODO: debug? log.info( 'Attempting to mark %s as dead, although it is already ' 'marked dead', master ) def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} successful = False # Call for each master for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) successful = True except SaltClientError: log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) if not successful: log.critical('Unable to call %s on any masters!', func) def _return_pub_syndic(self, values, master_id=None): ''' Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for ''' func = '_return_pub_multi' for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue future, data = self.pub_futures.get(master, (None, None)) if future is not None: if not future.done(): if master == master_id: # Targeted master previous send not done yet, call again later return False else: # Fallback master is busy, try the next one continue elif future.exception(): # Previous execution on this master returned an error log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) del self.pub_futures[master] # Add not sent data to the delayed list and try the next master self.delayed.extend(data) continue future = getattr(syndic_future.result(), func)(values, '_syndic_return', timeout=self._return_retry_timer(), sync=False) self.pub_futures[master] = (future, values) return True # Loop done and didn't exit: wasn't sent, try again later return False def _reset_event_aggregation(self): self.job_rets = {} self.raw_events = [] def reconnect_event_bus(self, something): future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # Syndic Tune In def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' self._spawn_syndics() # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) self.local.event.subscribe('') log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id']) # register the event sub to the poller self.job_rets = {} self.raw_events = [] self._reset_event_aggregation() future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, self.opts['syndic_event_forward_timeout'] * 1000, ) self.forward_events.start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() self.io_loop.start() def _process_event(self, raw): # TODO: cleanup: Move down into event class mtag, data = self.local.event.unpack(raw, self.local.event.serial) log.trace('Got event %s', mtag) # pylint: disable=no-member tag_parts = mtag.split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in data: if 'jid' not in data: # Not a job return return if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1): log.debug('Return received with matching master_id, not forwarding') return master = data.get('master_id') jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {}) if not jdict: jdict['__fun__'] = data.get('fun') jdict['__jid__'] = data['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if data['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](data['jid']) ) self.jid_forward_cache.add(data['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if master is not None: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = master ret = {} for key in 'return', 'retcode', 'success': if key in data: ret[key] = data[key] jdict[data['id']] = ret else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events if self.syndic_mode == 'sync': # Add generic event aggregation here if 'retcode' not in data: self.raw_events.append({'data': data, 'tag': mtag}) def _forward_events(self): log.trace('Forwarding events') # pylint: disable=no-member if self.raw_events: events = self.raw_events self.raw_events = [] self._call_syndic('_fire_master', kwargs={'events': events, 'pretag': tagify(self.opts['id'], base='syndic'), 'timeout': self._return_retry_timer(), 'sync': False, }, ) if self.delayed: res = self._return_pub_syndic(self.delayed) if res: self.delayed = [] for master in list(six.iterkeys(self.job_rets)): values = list(six.itervalues(self.job_rets[master])) res = self._return_pub_syndic(values, master_id=master) if res: del self.job_rets[master]
saltstack/salt
salt/minion.py
SyndicManager.tune_in
python
def tune_in(self): ''' Lock onto the publisher. This is the main event loop for the syndic ''' self._spawn_syndics() # Instantiate the local client self.local = salt.client.get_local_client( self.opts['_minion_conf_file'], io_loop=self.io_loop) self.local.event.subscribe('') log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id']) # register the event sub to the poller self.job_rets = {} self.raw_events = [] self._reset_event_aggregation() future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # forward events every syndic_event_forward_timeout self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events, self.opts['syndic_event_forward_timeout'] * 1000, ) self.forward_events.start() # Make sure to gracefully handle SIGUSR1 enable_sigusr1_handler() self.io_loop.start()
Lock onto the publisher. This is the main event loop for the syndic
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L3324-L3352
[ "def get_local_client(\n c_path=os.path.join(syspaths.CONFIG_DIR, 'master'),\n mopts=None,\n skip_perm_errors=False,\n io_loop=None,\n auto_reconnect=False):\n '''\n .. versionadded:: 2014.7.0\n\n Read in the config and return the correct LocalClient object based on\n the configured transport\n\n :param IOLoop io_loop: io_loop used for events.\n Pass in an io_loop if you want asynchronous\n operation for obtaining events. Eg use of\n set_event_handler() API. Otherwise, operation\n will be synchronous.\n '''\n if mopts:\n opts = mopts\n else:\n # Late import to prevent circular import\n import salt.config\n opts = salt.config.client_config(c_path)\n\n # TODO: AIO core is separate from transport\n return LocalClient(\n mopts=opts,\n skip_perm_errors=skip_perm_errors,\n io_loop=io_loop,\n auto_reconnect=auto_reconnect)\n", "def enable_sigusr1_handler():\n '''\n Pretty print a stack trace to the console or a debug log under /tmp\n when any of the salt daemons such as salt-master are sent a SIGUSR1\n '''\n enable_sig_handler('SIGUSR1', _handle_sigusr1)\n # Also canonical BSD-way of printing progress is SIGINFO\n # which on BSD-derivatives can be sent via Ctrl+T\n enable_sig_handler('SIGINFO', _handle_sigusr1)\n", "def _spawn_syndics(self):\n '''\n Spawn all the coroutines which will sign in the syndics\n '''\n self._syndics = OrderedDict() # mapping of opts['master'] -> syndic\n masters = self.opts['master']\n if not isinstance(masters, list):\n masters = [masters]\n for master in masters:\n s_opts = copy.copy(self.opts)\n s_opts['master'] = master\n self._syndics[master] = self._connect_syndic(s_opts)\n", "def _reset_event_aggregation(self):\n self.job_rets = {}\n self.raw_events = []\n" ]
class SyndicManager(MinionBase): ''' Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from all minions connected to it to the list of masters it is connected to. Modes (controlled by `syndic_mode`: sync: This mode will synchronize all events and publishes from higher level masters cluster: This mode will only sync job publishes and returns Note: jobs will be returned best-effort to the requesting master. This also means (since we are using zmq) that if a job was fired and the master disconnects between the publish and return, that the return will end up in a zmq buffer in this Syndic headed to that original master. In addition, since these classes all seem to use a mix of blocking and non-blocking calls (with varying timeouts along the way) this daemon does not handle failure well, it will (under most circumstances) stall the daemon for ~15s trying to forward events to the down master ''' # time to connect to upstream master SYNDIC_CONNECT_TIMEOUT = 5 SYNDIC_EVENT_TIMEOUT = 5 def __init__(self, opts, io_loop=None): opts['loop_interval'] = 1 super(SyndicManager, self).__init__(opts) self.mminion = salt.minion.MasterMinion(opts) # sync (old behavior), cluster (only returns and publishes) self.syndic_mode = self.opts.get('syndic_mode', 'sync') self.syndic_failover = self.opts.get('syndic_failover', 'random') self.auth_wait = self.opts['acceptance_wait_time'] self.max_auth_wait = self.opts['acceptance_wait_time_max'] self._has_master = threading.Event() self.jid_forward_cache = set() if io_loop is None: install_zmq() self.io_loop = ZMQDefaultLoop.current() else: self.io_loop = io_loop # List of events self.raw_events = [] # Dict of rets: {master_id: {event_tag: job_ret, ...}, ...} self.job_rets = {} # List of delayed job_rets which was unable to send for some reason and will be resend to # any available master self.delayed = [] # Active pub futures: {master_id: (future, [job_ret, ...]), ...} self.pub_futures = {} def _spawn_syndics(self): ''' Spawn all the coroutines which will sign in the syndics ''' self._syndics = OrderedDict() # mapping of opts['master'] -> syndic masters = self.opts['master'] if not isinstance(masters, list): masters = [masters] for master in masters: s_opts = copy.copy(self.opts) s_opts['master'] = master self._syndics[master] = self._connect_syndic(s_opts) @tornado.gen.coroutine def _connect_syndic(self, opts): ''' Create a syndic, and asynchronously connect it to a master ''' auth_wait = opts['acceptance_wait_time'] failed = False while True: if failed: if auth_wait < self.max_auth_wait: auth_wait += self.auth_wait log.debug( "sleeping before reconnect attempt to %s [%d/%d]", opts['master'], auth_wait, self.max_auth_wait, ) yield tornado.gen.sleep(auth_wait) # TODO: log? log.debug( 'Syndic attempting to connect to %s', opts['master'] ) try: syndic = Syndic(opts, timeout=self.SYNDIC_CONNECT_TIMEOUT, safe=False, io_loop=self.io_loop, ) yield syndic.connect_master(failed=failed) # set up the syndic to handle publishes (specifically not event forwarding) syndic.tune_in_no_block() # Send an event to the master that the minion is live syndic.fire_master_syndic_start() log.info( 'Syndic successfully connected to %s', opts['master'] ) break except SaltClientError as exc: failed = True log.error( 'Error while bringing up syndic for multi-syndic. Is the ' 'master at %s responding?', opts['master'] ) except (KeyboardInterrupt, SystemExit): raise except Exception: failed = True log.critical( 'Unexpected error while connecting to %s', opts['master'], exc_info=True ) raise tornado.gen.Return(syndic) def _mark_master_dead(self, master): ''' Mark a master as dead. This will start the sign-in routine ''' # if its connected, mark it dead if self._syndics[master].done(): syndic = self._syndics[master].result() # pylint: disable=no-member self._syndics[master] = syndic.reconnect() else: # TODO: debug? log.info( 'Attempting to mark %s as dead, although it is already ' 'marked dead', master ) def _call_syndic(self, func, args=(), kwargs=None, master_id=None): ''' Wrapper to call a given func on a syndic, best effort to get the one you asked for ''' if kwargs is None: kwargs = {} successful = False # Call for each master for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue try: getattr(syndic_future.result(), func)(*args, **kwargs) successful = True except SaltClientError: log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) if not successful: log.critical('Unable to call %s on any masters!', func) def _return_pub_syndic(self, values, master_id=None): ''' Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for ''' func = '_return_pub_multi' for master, syndic_future in self.iter_master_options(master_id): if not syndic_future.done() or syndic_future.exception(): log.error( 'Unable to call %s on %s, that syndic is not connected', func, master ) continue future, data = self.pub_futures.get(master, (None, None)) if future is not None: if not future.done(): if master == master_id: # Targeted master previous send not done yet, call again later return False else: # Fallback master is busy, try the next one continue elif future.exception(): # Previous execution on this master returned an error log.error( 'Unable to call %s on %s, trying another...', func, master ) self._mark_master_dead(master) del self.pub_futures[master] # Add not sent data to the delayed list and try the next master self.delayed.extend(data) continue future = getattr(syndic_future.result(), func)(values, '_syndic_return', timeout=self._return_retry_timer(), sync=False) self.pub_futures[master] = (future, values) return True # Loop done and didn't exit: wasn't sent, try again later return False def iter_master_options(self, master_id=None): ''' Iterate (in order) over your options for master ''' masters = list(self._syndics.keys()) if self.opts['syndic_failover'] == 'random': shuffle(masters) if master_id not in self._syndics: master_id = masters.pop(0) else: masters.remove(master_id) while True: yield master_id, self._syndics[master_id] if not masters: break master_id = masters.pop(0) def _reset_event_aggregation(self): self.job_rets = {} self.raw_events = [] def reconnect_event_bus(self, something): future = self.local.event.set_event_handler(self._process_event) self.io_loop.add_future(future, self.reconnect_event_bus) # Syndic Tune In def _process_event(self, raw): # TODO: cleanup: Move down into event class mtag, data = self.local.event.unpack(raw, self.local.event.serial) log.trace('Got event %s', mtag) # pylint: disable=no-member tag_parts = mtag.split('/') if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \ salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \ 'return' in data: if 'jid' not in data: # Not a job return return if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1): log.debug('Return received with matching master_id, not forwarding') return master = data.get('master_id') jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {}) if not jdict: jdict['__fun__'] = data.get('fun') jdict['__jid__'] = data['jid'] jdict['__load__'] = {} fstr = '{0}.get_load'.format(self.opts['master_job_cache']) # Only need to forward each load once. Don't hit the disk # for every minion return! if data['jid'] not in self.jid_forward_cache: jdict['__load__'].update( self.mminion.returners[fstr](data['jid']) ) self.jid_forward_cache.add(data['jid']) if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']: # Pop the oldest jid from the cache tmp = sorted(list(self.jid_forward_cache)) tmp.pop(0) self.jid_forward_cache = set(tmp) if master is not None: # __'s to make sure it doesn't print out on the master cli jdict['__master_id__'] = master ret = {} for key in 'return', 'retcode', 'success': if key in data: ret[key] = data[key] jdict[data['id']] = ret else: # TODO: config to forward these? If so we'll have to keep track of who # has seen them # if we are the top level masters-- don't forward all the minion events if self.syndic_mode == 'sync': # Add generic event aggregation here if 'retcode' not in data: self.raw_events.append({'data': data, 'tag': mtag}) def _forward_events(self): log.trace('Forwarding events') # pylint: disable=no-member if self.raw_events: events = self.raw_events self.raw_events = [] self._call_syndic('_fire_master', kwargs={'events': events, 'pretag': tagify(self.opts['id'], base='syndic'), 'timeout': self._return_retry_timer(), 'sync': False, }, ) if self.delayed: res = self._return_pub_syndic(self.delayed) if res: self.delayed = [] for master in list(six.iterkeys(self.job_rets)): values = list(six.itervalues(self.job_rets[master])) res = self._return_pub_syndic(values, master_id=master) if res: del self.job_rets[master]
saltstack/salt
salt/minion.py
ProxyMinion._post_master_init
python
def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check Minion._post_master_init to see if those changes need to be propagated. ProxyMinions need a significantly different post master setup, which is why the differences are not factored out into separate helper functions. ''' mp_call = _metaproxy_call(self.opts, 'post_master_init') return mp_call(self, master)
Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check Minion._post_master_init to see if those changes need to be propagated. ProxyMinions need a significantly different post master setup, which is why the differences are not factored out into separate helper functions.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L3471-L3486
null
class ProxyMinion(Minion): ''' This class instantiates a 'proxy' minion--a minion that does not manipulate the host it runs on, but instead manipulates a device that cannot run a minion. ''' # TODO: better name... @tornado.gen.coroutine def _target_load(self, load): ''' Verify that the publication is valid and applies to this minion ''' mp_call = _metaproxy_call(self.opts, 'target_load') return mp_call(self, load) def _handle_payload(self, payload): mp_call = _metaproxy_call(self.opts, 'handle_payload') return mp_call(self, payload) @tornado.gen.coroutine def _handle_decoded_payload(self, data): mp_call = _metaproxy_call(self.opts, 'handle_decoded_payload') return mp_call(self, data) @classmethod def _target(cls, minion_instance, opts, data, connected): mp_call = _metaproxy_call(opts, 'target') return mp_call(cls, minion_instance, opts, data, connected) @classmethod def _thread_return(cls, minion_instance, opts, data): mp_call = _metaproxy_call(opts, 'thread_return') return mp_call(cls, minion_instance, opts, data) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): mp_call = _metaproxy_call(opts, 'thread_multi_return') return mp_call(cls, minion_instance, opts, data)
saltstack/salt
salt/minion.py
ProxyMinion._target_load
python
def _target_load(self, load): ''' Verify that the publication is valid and applies to this minion ''' mp_call = _metaproxy_call(self.opts, 'target_load') return mp_call(self, load)
Verify that the publication is valid and applies to this minion
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L3488-L3493
null
class ProxyMinion(Minion): ''' This class instantiates a 'proxy' minion--a minion that does not manipulate the host it runs on, but instead manipulates a device that cannot run a minion. ''' # TODO: better name... @tornado.gen.coroutine def _post_master_init(self, master): ''' Function to finish init after connecting to a master This is primarily loading modules, pillars, etc. (since they need to know which master they connected to) If this function is changed, please check Minion._post_master_init to see if those changes need to be propagated. ProxyMinions need a significantly different post master setup, which is why the differences are not factored out into separate helper functions. ''' mp_call = _metaproxy_call(self.opts, 'post_master_init') return mp_call(self, master) def _handle_payload(self, payload): mp_call = _metaproxy_call(self.opts, 'handle_payload') return mp_call(self, payload) @tornado.gen.coroutine def _handle_decoded_payload(self, data): mp_call = _metaproxy_call(self.opts, 'handle_decoded_payload') return mp_call(self, data) @classmethod def _target(cls, minion_instance, opts, data, connected): mp_call = _metaproxy_call(opts, 'target') return mp_call(cls, minion_instance, opts, data, connected) @classmethod def _thread_return(cls, minion_instance, opts, data): mp_call = _metaproxy_call(opts, 'thread_return') return mp_call(cls, minion_instance, opts, data) @classmethod def _thread_multi_return(cls, minion_instance, opts, data): mp_call = _metaproxy_call(opts, 'thread_multi_return') return mp_call(cls, minion_instance, opts, data)
saltstack/salt
salt/minion.py
SProxyMinion.gen_modules
python
def gen_modules(self, initial_load=False): ''' Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules ''' self.opts['grains'] = salt.loader.grains(self.opts) self.opts['pillar'] = salt.pillar.get_pillar( self.opts, self.opts['grains'], self.opts['id'], saltenv=self.opts['saltenv'], pillarenv=self.opts.get('pillarenv'), ).compile_pillar() if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts: errmsg = ( 'No "proxy" configuration key found in pillar or opts ' 'dictionaries for id {id}. Check your pillar/options ' 'configuration and contents. Salt-proxy aborted.' ).format(id=self.opts['id']) log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) if 'proxy' not in self.opts: self.opts['proxy'] = self.opts['pillar']['proxy'] # Then load the proxy module self.proxy = salt.loader.proxy(self.opts) self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=False, proxy=self.proxy) self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy) self.matchers = salt.loader.matchers(self.opts) self.functions['sys.reload_modules'] = self.gen_modules self.executors = salt.loader.executors(self.opts, self.functions, proxy=self.proxy) fq_proxyname = self.opts['proxy']['proxytype'] # we can then sync any proxymodules down from the master # we do a sync_all here in case proxy code was installed by # SPM or was manually placed in /srv/salt/_modules etc. self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv']) self.functions.pack['__proxy__'] = self.proxy self.proxy.pack['__salt__'] = self.functions self.proxy.pack['__ret__'] = self.returners self.proxy.pack['__pillar__'] = self.opts['pillar'] # Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__ self.utils = salt.loader.utils(self.opts, proxy=self.proxy) self.proxy.pack['__utils__'] = self.utils # Reload all modules so all dunder variables are injected self.proxy.reload_modules() if ('{0}.init'.format(fq_proxyname) not in self.proxy or '{0}.shutdown'.format(fq_proxyname) not in self.proxy): errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \ 'Check your proxymodule. Salt-proxy aborted.' log.error(errmsg) self._running = False raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg) self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])() proxy_init_fn = self.proxy[fq_proxyname + '.init'] proxy_init_fn(self.opts) self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy) # Sync the grains here so the proxy can communicate them to the master self.functions['saltutil.sync_grains'](saltenv='base') self.grains_cache = self.opts['grains'] self.ready = True
Tell the minion to reload the execution modules CLI Example: .. code-block:: bash salt '*' sys.reload_modules
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/minion.py#L3528-L3607
[ "def get_pillar(opts, grains, minion_id, saltenv=None, ext=None, funcs=None,\n pillar_override=None, pillarenv=None, extra_minion_data=None):\n '''\n Return the correct pillar driver based on the file_client option\n '''\n file_client = opts['file_client']\n if opts.get('master_type') == 'disable' and file_client == 'remote':\n file_client = 'local'\n ptype = {\n 'remote': RemotePillar,\n 'local': Pillar\n }.get(file_client, Pillar)\n # If local pillar and we're caching, run through the cache system first\n log.debug('Determining pillar cache')\n if opts['pillar_cache']:\n log.info('Compiling pillar from cache')\n log.debug('get_pillar using pillar cache with ext: %s', ext)\n return PillarCache(opts, grains, minion_id, saltenv, ext=ext, functions=funcs,\n pillar_override=pillar_override, pillarenv=pillarenv)\n return ptype(opts, grains, minion_id, saltenv, ext, functions=funcs,\n pillar_override=pillar_override, pillarenv=pillarenv,\n extra_minion_data=extra_minion_data)\n", "def compile_pillar(self, *args, **kwargs): # Will likely just be pillar_dirs\n '''\n Compile pillar and set it to the cache, if not found.\n\n :param args:\n :param kwargs:\n :return:\n '''\n log.debug('Scanning pillar cache for information about minion %s and pillarenv %s', self.minion_id, self.pillarenv)\n log.debug('Scanning cache for minion %s: %s', self.minion_id, self.cache[self.minion_id] or '*empty*')\n\n # Check the cache!\n if self.minion_id in self.cache: # Keyed by minion_id\n # TODO Compare grains, etc?\n if self.pillarenv in self.cache[self.minion_id]:\n # We have a cache hit! Send it back.\n log.debug('Pillar cache hit for minion %s and pillarenv %s', self.minion_id, self.pillarenv)\n pillar_data = self.cache[self.minion_id][self.pillarenv]\n else:\n # We found the minion but not the env. Store it.\n pillar_data = self.fetch_pillar()\n self.cache[self.minion_id][self.pillarenv] = pillar_data\n self.cache.store()\n log.debug('Pillar cache miss for pillarenv %s for minion %s', self.pillarenv, self.minion_id)\n else:\n # We haven't seen this minion yet in the cache. Store it.\n pillar_data = self.fetch_pillar()\n self.cache[self.minion_id] = {self.pillarenv: pillar_data}\n log.debug('Pillar cache has been added for minion %s', self.minion_id)\n log.debug('Current pillar cache: %s', self.cache[self.minion_id])\n\n # we dont want the pillar_override baked into the cached fetch_pillar from above\n if self.pillar_override:\n pillar_data = merge(\n pillar_data,\n self.pillar_override,\n self.opts.get('pillar_source_merging_strategy', 'smart'),\n self.opts.get('renderer', 'yaml'),\n self.opts.get('pillar_merge_lists', False))\n pillar_data.update(self.pillar_override)\n\n return pillar_data\n" ]
class SProxyMinion(SMinion): ''' Create an object that has loaded all of the minion module functions, grains, modules, returners etc. The SProxyMinion allows developers to generate all of the salt minion functions and present them with these functions for general use. '''
saltstack/salt
salt/states/pkg.py
_get_comparison_spec
python
def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr
Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L153-L162
null
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
_parse_version_string
python
def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result
Returns a list of two-tuples containing (operator, version).
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L165-L176
[ "def _get_comparison_spec(pkgver):\n '''\n Return a tuple containing the comparison operator and the version. If no\n comparison operator was passed, the comparison is assumed to be an \"equals\"\n comparison, and \"==\" will be the operator returned.\n '''\n oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip())\n if oper in ('=', ''):\n oper = '=='\n return oper, verstr\n" ]
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
_fulfills_version_string
python
def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False
Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3)
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L179-L215
[ "def _fulfills_version_spec(versions, oper, desired_version,\n ignore_epoch=False):\n '''\n Returns True if any of the installed versions match the specified version,\n otherwise returns False\n '''\n cmp_func = __salt__.get('pkg.version_cmp')\n # stripping \"with_origin\" dict wrapper\n if salt.utils.platform.is_freebsd():\n if isinstance(versions, dict) and 'version' in versions:\n versions = versions['version']\n for ver in versions:\n if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \\\n or salt.utils.versions.compare(ver1=ver,\n oper=oper,\n ver2=desired_version,\n cmp_func=cmp_func,\n ignore_epoch=ignore_epoch):\n return True\n return False\n", "def _parse_version_string(version_conditions_string):\n '''\n Returns a list of two-tuples containing (operator, version).\n '''\n result = []\n version_conditions_string = version_conditions_string.strip()\n if not version_conditions_string:\n return result\n for version_condition in version_conditions_string.split(','):\n operator_and_version = _get_comparison_spec(version_condition)\n result.append(operator_and_version)\n return result\n" ]
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
_fulfills_version_spec
python
def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False
Returns True if any of the installed versions match the specified version, otherwise returns False
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L218-L237
[ "def compare(ver1='', oper='==', ver2='', cmp_func=None, ignore_epoch=False):\n '''\n Compares two version numbers. Accepts a custom function to perform the\n cmp-style version comparison, otherwise uses version_cmp().\n '''\n cmp_map = {'<': (-1,), '<=': (-1, 0), '==': (0,),\n '>=': (0, 1), '>': (1,)}\n if oper not in ('!=',) and oper not in cmp_map:\n log.error('Invalid operator \\'%s\\' for version comparison', oper)\n return False\n\n if cmp_func is None:\n cmp_func = version_cmp\n\n cmp_result = cmp_func(ver1, ver2, ignore_epoch=ignore_epoch)\n if cmp_result is None:\n return False\n\n # Check if integer/long\n if not isinstance(cmp_result, numbers.Integral):\n log.error('The version comparison function did not return an '\n 'integer/long.')\n return False\n\n if oper == '!=':\n return cmp_result not in cmp_map['==']\n else:\n # Gracefully handle cmp_result not in (-1, 0, 1).\n if cmp_result < -1:\n cmp_result = -1\n elif cmp_result > 1:\n cmp_result = 1\n\n return cmp_result in cmp_map[oper]\n" ]
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
_find_unpurge_targets
python
def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ]
Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L240-L250
null
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
_find_download_targets
python
def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets
Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L253-L372
null
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
_find_advisory_targets
python
def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets
Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L375-L412
null
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
_find_remove_targets
python
def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets
Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L415-L493
null
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
_find_install_targets
python
def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed)
Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L496-L837
null
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
_verify_install
python
def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed
Determine whether or not the installed packages match what was requested in the SLS file.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L840-L884
null
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
_get_desired_pkg
python
def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name])
Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L887-L898
null
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
_preflight_check
python
def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret
Perform platform-specific checks on desired packages
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L901-L917
null
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
_nested_output
python
def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret
Serialize obj and format for output
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L920-L926
null
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
_resolve_capabilities
python
def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False
Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L929-L946
null
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
installed
python
def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret
Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L949-L2023
[ "def check_refresh(opts, refresh=None):\n '''\n Check whether or not a refresh is necessary\n\n Returns:\n\n - True if refresh evaluates as True\n - False if refresh is False\n - A boolean if refresh is not False and the rtag file exists\n '''\n return bool(\n salt.utils.data.is_true(refresh) or\n (os.path.isfile(rtag(opts)) and refresh is not False)\n )\n", "def _nested_output(obj):\n '''\n Serialize obj and format for output\n '''\n nested.__opts__ = __opts__\n ret = nested.output(obj).rstrip()\n return ret\n", "def _find_install_targets(name=None,\n version=None,\n pkgs=None,\n sources=None,\n skip_suggestions=False,\n pkg_verify=False,\n normalize=True,\n ignore_epoch=False,\n reinstall=False,\n refresh=False,\n **kwargs):\n '''\n Inspect the arguments to pkg.installed and discover what packages need to\n be installed. Return a dict of desired packages\n '''\n was_refreshed = False\n\n if all((pkgs, sources)):\n return {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': 'Only one of \"pkgs\" and \"sources\" is permitted.'}\n\n # dict for packages that fail pkg.verify and their altered files\n altered_files = {}\n # Get the ignore_types list if any from the pkg_verify argument\n if isinstance(pkg_verify, list) \\\n and any(x.get('ignore_types') is not None\n for x in pkg_verify\n if isinstance(x, _OrderedDict)\n and 'ignore_types' in x):\n ignore_types = next(x.get('ignore_types')\n for x in pkg_verify\n if 'ignore_types' in x)\n else:\n ignore_types = []\n\n # Get the verify_options list if any from the pkg_verify argument\n if isinstance(pkg_verify, list) \\\n and any(x.get('verify_options') is not None\n for x in pkg_verify\n if isinstance(x, _OrderedDict)\n and 'verify_options' in x):\n verify_options = next(x.get('verify_options')\n for x in pkg_verify\n if 'verify_options' in x)\n else:\n verify_options = []\n\n if __grains__['os'] == 'FreeBSD':\n kwargs['with_origin'] = True\n\n if salt.utils.platform.is_windows():\n # Windows requires a refresh to establish a pkg db if refresh=True, so\n # add it to the kwargs.\n kwargs['refresh'] = refresh\n\n resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__\n try:\n cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs)\n cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict()\n except CommandExecutionError as exc:\n return {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': exc.strerror}\n\n if salt.utils.platform.is_windows() and kwargs.pop('refresh', False):\n # We already refreshed when we called pkg.list_pkgs\n was_refreshed = True\n refresh = False\n\n if any((pkgs, sources)):\n if pkgs:\n desired = _repack_pkgs(pkgs, normalize=normalize)\n elif sources:\n desired = __salt__['pkg_resource.pack_sources'](\n sources,\n normalize=normalize,\n )\n\n if not desired:\n # Badly-formatted SLS\n return {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': 'Invalidly formatted \\'{0}\\' parameter. See '\n 'minion log.'.format('pkgs' if pkgs\n else 'sources')}\n\n to_unpurge = _find_unpurge_targets(desired, **kwargs)\n else:\n if salt.utils.platform.is_windows():\n pkginfo = _get_package_info(name, saltenv=kwargs['saltenv'])\n if not pkginfo:\n return {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': 'Package {0} not found in the '\n 'repository.'.format(name)}\n if version is None:\n version = _get_latest_pkg_version(pkginfo)\n\n if normalize:\n _normalize_name = \\\n __salt__.get('pkg.normalize_name', lambda pkgname: pkgname)\n desired = {_normalize_name(name): version}\n else:\n desired = {name: version}\n\n to_unpurge = _find_unpurge_targets(desired, **kwargs)\n\n # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names\n origin = bool(re.search('/', name))\n\n if __grains__['os'] == 'FreeBSD' and origin:\n cver = [k for k, v in six.iteritems(cur_pkgs)\n if v['origin'] == name]\n else:\n cver = cur_pkgs.get(name, [])\n\n if name not in to_unpurge:\n if version and version in cver \\\n and not reinstall \\\n and not pkg_verify:\n # The package is installed and is the correct version\n return {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': 'Version {0} of package \\'{1}\\' is already '\n 'installed'.format(version, name)}\n\n # if cver is not an empty string, the package is already installed\n elif cver and version is None \\\n and not reinstall \\\n and not pkg_verify:\n # The package is installed\n return {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': 'Package {0} is already '\n 'installed'.format(name)}\n\n version_spec = False\n if not sources:\n # Check for alternate package names if strict processing is not\n # enforced. Takes extra time. Disable for improved performance\n if not skip_suggestions:\n # Perform platform-specific pre-flight checks\n not_installed = dict([\n (name, version)\n for name, version in desired.items()\n if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version)))\n ])\n if not_installed:\n try:\n problems = _preflight_check(not_installed, **kwargs)\n except CommandExecutionError:\n pass\n else:\n comments = []\n if problems.get('no_suggest'):\n comments.append(\n 'The following package(s) were not found, and no '\n 'possible matches were found in the package db: '\n '{0}'.format(\n ', '.join(sorted(problems['no_suggest']))\n )\n )\n if problems.get('suggest'):\n for pkgname, suggestions in \\\n six.iteritems(problems['suggest']):\n comments.append(\n 'Package \\'{0}\\' not found (possible matches: '\n '{1})'.format(pkgname, ', '.join(suggestions))\n )\n if comments:\n if len(comments) > 1:\n comments.append('')\n return {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': '. '.join(comments).rstrip()}\n\n # Resolve the latest package version for any packages with \"latest\" in the\n # package version\n wants_latest = [] \\\n if sources \\\n else [x for x, y in six.iteritems(desired) if y == 'latest']\n if wants_latest:\n resolved_latest = __salt__['pkg.latest_version'](*wants_latest,\n refresh=refresh,\n **kwargs)\n if len(wants_latest) == 1:\n resolved_latest = {wants_latest[0]: resolved_latest}\n if refresh:\n was_refreshed = True\n refresh = False\n\n # pkg.latest_version returns an empty string when the package is\n # up-to-date. So check the currently-installed packages. If found, the\n # resolved latest version will be the currently installed one from\n # cur_pkgs. If not found, then the package doesn't exist and the\n # resolved latest version will be None.\n for key in resolved_latest:\n if not resolved_latest[key]:\n if key in cur_pkgs:\n resolved_latest[key] = cur_pkgs[key][-1]\n else:\n resolved_latest[key] = None\n # Update the desired versions with the ones we resolved\n desired.update(resolved_latest)\n\n # Find out which packages will be targeted in the call to pkg.install\n targets = {}\n to_reinstall = {}\n problems = []\n warnings = []\n failed_verify = False\n for package_name, version_string in six.iteritems(desired):\n cver = cur_pkgs.get(package_name, [])\n if resolve_capabilities and not cver and package_name in cur_prov:\n cver = cur_pkgs.get(cur_prov.get(package_name)[0], [])\n\n # Package not yet installed, so add to targets\n if not cver:\n targets[package_name] = version_string\n continue\n if sources:\n if reinstall:\n to_reinstall[package_name] = version_string\n continue\n elif 'lowpkg.bin_pkg_info' not in __salt__:\n continue\n # Metadata parser is available, cache the file and derive the\n # package's name and version\n err = 'Unable to cache {0}: {1}'\n try:\n cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv'])\n except CommandExecutionError as exc:\n problems.append(err.format(version_string, exc))\n continue\n if not cached_path:\n problems.append(err.format(version_string, 'file not found'))\n continue\n elif not os.path.exists(cached_path):\n problems.append('{0} does not exist on minion'.format(version_string))\n continue\n source_info = __salt__['lowpkg.bin_pkg_info'](cached_path)\n if source_info is None:\n warnings.append('Failed to parse metadata for {0}'.format(version_string))\n continue\n else:\n verstr = source_info['version']\n else:\n verstr = version_string\n if reinstall:\n to_reinstall[package_name] = version_string\n continue\n if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string):\n targets[package_name] = version_string\n continue\n # No version specified and pkg is installed\n elif __salt__['pkg_resource.version_clean'](version_string) is None:\n if (not reinstall) and pkg_verify:\n try:\n verify_result = __salt__['pkg.verify'](\n package_name,\n ignore_types=ignore_types,\n verify_options=verify_options,\n **kwargs\n )\n except (CommandExecutionError, SaltInvocationError) as exc:\n failed_verify = exc.strerror\n continue\n if verify_result:\n to_reinstall[package_name] = version_string\n altered_files[package_name] = verify_result\n continue\n version_fulfilled = False\n allow_updates = bool(not sources and kwargs.get('allow_updates'))\n try:\n version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates)\n except CommandExecutionError as exc:\n problems.append(exc.strerror)\n continue\n\n # Compare desired version against installed version.\n version_spec = True\n if not version_fulfilled:\n if reinstall:\n to_reinstall[package_name] = version_string\n else:\n version_conditions = _parse_version_string(version_string)\n if pkg_verify and any(oper == '==' for oper, version in version_conditions):\n try:\n verify_result = __salt__['pkg.verify'](\n package_name,\n ignore_types=ignore_types,\n verify_options=verify_options,\n **kwargs\n )\n except (CommandExecutionError, SaltInvocationError) as exc:\n failed_verify = exc.strerror\n continue\n if verify_result:\n to_reinstall[package_name] = version_string\n altered_files[package_name] = verify_result\n else:\n log.debug(\n 'Current version (%s) did not match desired version '\n 'specification (%s), adding to installation targets',\n cver, version_string\n )\n targets[package_name] = version_string\n\n if failed_verify:\n problems.append(failed_verify)\n\n if problems:\n return {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': ' '.join(problems)}\n\n if not any((targets, to_unpurge, to_reinstall)):\n # All specified packages are installed\n msg = 'All specified packages are already installed{0}'\n msg = msg.format(\n ' and are at the desired version' if version_spec and not sources\n else ''\n )\n ret = {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': msg}\n if warnings:\n ret.setdefault('warnings', []).extend(warnings)\n return ret\n\n return (desired, targets, to_unpurge, to_reinstall, altered_files,\n warnings, was_refreshed)\n", "def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None):\n '''\n Determine whether or not the installed packages match what was requested in\n the SLS file.\n '''\n ok = []\n failed = []\n if not new_caps:\n new_caps = dict()\n for pkgname, pkgver in desired.items():\n # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names.\n # Homebrew for Mac OSX does something similar with tap names\n # prefixing package names, separated with a slash.\n has_origin = '/' in pkgname\n\n if __grains__['os'] == 'FreeBSD' and has_origin:\n cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname]\n elif __grains__['os'] == 'MacOS' and has_origin:\n cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1]))\n elif __grains__['os'] == 'OpenBSD':\n cver = new_pkgs.get(pkgname.split('%')[0])\n elif __grains__['os_family'] == 'Debian':\n cver = new_pkgs.get(pkgname.split('=')[0])\n else:\n cver = new_pkgs.get(pkgname)\n if not cver and pkgname in new_caps:\n cver = new_pkgs.get(new_caps.get(pkgname)[0])\n\n if not cver:\n failed.append(pkgname)\n continue\n elif pkgver == 'latest':\n ok.append(pkgname)\n continue\n elif not __salt__['pkg_resource.version_clean'](pkgver):\n ok.append(pkgname)\n continue\n elif pkgver.endswith(\"*\") and cver[0].startswith(pkgver[:-1]):\n ok.append(pkgname)\n continue\n if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch):\n ok.append(pkgname)\n else:\n failed.append(pkgname)\n return ok, failed\n", "def _get_desired_pkg(name, desired):\n '''\n Helper function that retrieves and nicely formats the desired pkg (and\n version if specified) so that helpful information can be printed in the\n comment for the state.\n '''\n if not desired[name] or desired[name].startswith(('<', '>', '=')):\n oper = ''\n else:\n oper = '='\n return '{0}{1}{2}'.format(name, oper,\n '' if not desired[name] else desired[name])\n", "def _resolve_capabilities(pkgs, refresh=False, **kwargs):\n '''\n Resolve capabilities in ``pkgs`` and exchange them with real package\n names, when the result is distinct.\n This feature can be turned on while setting the paramter\n ``resolve_capabilities`` to True.\n\n Return the input dictionary with replaced capability names and as\n second return value a bool which say if a refresh need to be run.\n\n In case of ``resolve_capabilities`` is False (disabled) or not\n supported by the implementation the input is returned unchanged.\n '''\n if not pkgs or 'pkg.resolve_capabilities' not in __salt__:\n return pkgs, refresh\n\n ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs)\n return ret, False\n" ]
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
downloaded
python
def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret
.. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository"
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L2026-L2181
[ "def _find_download_targets(name=None,\n version=None,\n pkgs=None,\n normalize=True,\n skip_suggestions=False,\n ignore_epoch=False,\n **kwargs):\n '''\n Inspect the arguments to pkg.downloaded and discover what packages need to\n be downloaded. Return a dict of packages to download.\n '''\n cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs)\n if pkgs:\n to_download = _repack_pkgs(pkgs, normalize=normalize)\n\n if not to_download:\n # Badly-formatted SLS\n return {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': 'Invalidly formatted pkgs parameter. See '\n 'minion log.'}\n else:\n if normalize:\n _normalize_name = \\\n __salt__.get('pkg.normalize_name', lambda pkgname: pkgname)\n to_download = {_normalize_name(name): version}\n else:\n to_download = {name: version}\n\n cver = cur_pkgs.get(name, {})\n if name in to_download:\n # Package already downloaded, no need to download again\n if cver and version in cver:\n return {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': 'Version {0} of package \\'{1}\\' is already '\n 'downloaded'.format(version, name)}\n\n # if cver is not an empty string, the package is already downloaded\n elif cver and version is None:\n # The package is downloaded\n return {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': 'Package {0} is already '\n 'downloaded'.format(name)}\n\n version_spec = False\n if not skip_suggestions:\n try:\n problems = _preflight_check(to_download, **kwargs)\n except CommandExecutionError:\n pass\n else:\n comments = []\n if problems.get('no_suggest'):\n comments.append(\n 'The following package(s) were not found, and no '\n 'possible matches were found in the package db: '\n '{0}'.format(\n ', '.join(sorted(problems['no_suggest']))\n )\n )\n if problems.get('suggest'):\n for pkgname, suggestions in \\\n six.iteritems(problems['suggest']):\n comments.append(\n 'Package \\'{0}\\' not found (possible matches: '\n '{1})'.format(pkgname, ', '.join(suggestions))\n )\n if comments:\n if len(comments) > 1:\n comments.append('')\n return {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': '. '.join(comments).rstrip()}\n\n # Find out which packages will be targeted in the call to pkg.download\n # Check current downloaded versions against specified versions\n targets = {}\n problems = []\n for pkgname, pkgver in six.iteritems(to_download):\n cver = cur_pkgs.get(pkgname, {})\n # Package not yet downloaded, so add to targets\n if not cver:\n targets[pkgname] = pkgver\n continue\n # No version specified but package is already downloaded\n elif cver and not pkgver:\n continue\n\n version_spec = True\n try:\n if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch):\n targets[pkgname] = pkgver\n except CommandExecutionError as exc:\n problems.append(exc.strerror)\n continue\n\n if problems:\n return {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': ' '.join(problems)}\n\n if not targets:\n # All specified packages are already downloaded\n msg = (\n 'All specified packages{0} are already downloaded'\n .format(' (matching specified versions)' if version_spec else '')\n )\n return {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': msg}\n\n return targets\n", "def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None):\n '''\n Determine whether or not the installed packages match what was requested in\n the SLS file.\n '''\n ok = []\n failed = []\n if not new_caps:\n new_caps = dict()\n for pkgname, pkgver in desired.items():\n # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names.\n # Homebrew for Mac OSX does something similar with tap names\n # prefixing package names, separated with a slash.\n has_origin = '/' in pkgname\n\n if __grains__['os'] == 'FreeBSD' and has_origin:\n cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname]\n elif __grains__['os'] == 'MacOS' and has_origin:\n cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1]))\n elif __grains__['os'] == 'OpenBSD':\n cver = new_pkgs.get(pkgname.split('%')[0])\n elif __grains__['os_family'] == 'Debian':\n cver = new_pkgs.get(pkgname.split('=')[0])\n else:\n cver = new_pkgs.get(pkgname)\n if not cver and pkgname in new_caps:\n cver = new_pkgs.get(new_caps.get(pkgname)[0])\n\n if not cver:\n failed.append(pkgname)\n continue\n elif pkgver == 'latest':\n ok.append(pkgname)\n continue\n elif not __salt__['pkg_resource.version_clean'](pkgver):\n ok.append(pkgname)\n continue\n elif pkgver.endswith(\"*\") and cver[0].startswith(pkgver[:-1]):\n ok.append(pkgname)\n continue\n if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch):\n ok.append(pkgname)\n else:\n failed.append(pkgname)\n return ok, failed\n", "def _resolve_capabilities(pkgs, refresh=False, **kwargs):\n '''\n Resolve capabilities in ``pkgs`` and exchange them with real package\n names, when the result is distinct.\n This feature can be turned on while setting the paramter\n ``resolve_capabilities`` to True.\n\n Return the input dictionary with replaced capability names and as\n second return value a bool which say if a refresh need to be run.\n\n In case of ``resolve_capabilities`` is False (disabled) or not\n supported by the implementation the input is returned unchanged.\n '''\n if not pkgs or 'pkg.resolve_capabilities' not in __salt__:\n return pkgs, refresh\n\n ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs)\n return ret, False\n" ]
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
patch_installed
python
def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret
.. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L2184-L2261
[ "def _find_advisory_targets(name=None,\n advisory_ids=None,\n **kwargs):\n '''\n Inspect the arguments to pkg.patch_installed and discover what advisory\n patches need to be installed. Return a dict of advisory patches to install.\n '''\n cur_patches = __salt__['pkg.list_installed_patches'](**kwargs)\n if advisory_ids:\n to_download = advisory_ids\n else:\n to_download = [name]\n if cur_patches.get(name, {}):\n # Advisory patch already installed, no need to install it again\n return {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': 'Advisory patch {0} is already '\n 'installed'.format(name)}\n\n # Find out which advisory patches will be targeted in the call to pkg.install\n targets = []\n for patch_name in to_download:\n cver = cur_patches.get(patch_name, {})\n # Advisory patch not yet installed, so add to targets\n if not cver:\n targets.append(patch_name)\n continue\n\n if not targets:\n # All specified packages are already downloaded\n msg = ('All specified advisory patches are already installed')\n return {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': msg}\n\n return targets\n" ]
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
patch_downloaded
python
def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs)
.. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L2264-L2295
[ "def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs):\n '''\n .. versionadded:: 2017.7.0\n\n Ensure that packages related to certain advisory ids are installed.\n\n Currently supported for the following pkg providers:\n :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>`\n\n CLI Example:\n\n .. code-block:: yaml\n\n issue-foo-fixed:\n pkg.patch_installed:\n - advisory_ids:\n - SUSE-SLE-SERVER-12-SP2-2017-185\n - SUSE-SLE-SERVER-12-SP2-2017-150\n - SUSE-SLE-SERVER-12-SP2-2017-120\n '''\n ret = {'name': name,\n 'changes': {},\n 'result': None,\n 'comment': ''}\n\n if 'pkg.list_patches' not in __salt__:\n ret['result'] = False\n ret['comment'] = 'The pkg.patch_installed state is not available on ' \\\n 'this platform'\n return ret\n\n if not advisory_ids and isinstance(advisory_ids, list):\n ret['result'] = True\n ret['comment'] = 'No advisory ids provided'\n return ret\n\n # Only downloading not yet downloaded packages\n targets = _find_advisory_targets(name, advisory_ids, **kwargs)\n if isinstance(targets, dict) and 'result' in targets:\n return targets\n elif not isinstance(targets, list):\n ret['result'] = False\n ret['comment'] = 'An error was encountered while checking targets: ' \\\n '{0}'.format(targets)\n return ret\n\n if __opts__['test']:\n summary = ', '.join(targets)\n ret['comment'] = 'The following advisory patches would be ' \\\n 'downloaded: {0}'.format(summary)\n return ret\n\n try:\n pkg_ret = __salt__['pkg.install'](name=name,\n advisory_ids=advisory_ids,\n downloadonly=downloadonly,\n **kwargs)\n ret['result'] = True\n ret['changes'].update(pkg_ret)\n except CommandExecutionError as exc:\n ret = {'name': name, 'result': False}\n if exc.info:\n # Get information for state return from the exception.\n ret['changes'] = exc.info.get('changes', {})\n ret['comment'] = exc.strerror_without_changes\n else:\n ret['changes'] = {}\n ret['comment'] = ('An error was encountered while downloading '\n 'package(s): {0}'.format(exc))\n return ret\n\n if not ret['changes'] and not ret['comment']:\n status = 'downloaded' if downloadonly else 'installed'\n ret['result'] = True\n ret['comment'] = ('Advisory patch is not needed or related packages '\n 'are already {0}'.format(status))\n\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
_uninstall
python
def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)}
Common function for package removal
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L2666-L2767
[ "def _find_remove_targets(name=None,\n version=None,\n pkgs=None,\n normalize=True,\n ignore_epoch=False,\n **kwargs):\n '''\n Inspect the arguments to pkg.removed and discover what packages need to\n be removed. Return a dict of packages to remove.\n '''\n if __grains__['os'] == 'FreeBSD':\n kwargs['with_origin'] = True\n cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs)\n if pkgs:\n to_remove = _repack_pkgs(pkgs, normalize=normalize)\n\n if not to_remove:\n # Badly-formatted SLS\n return {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': 'Invalidly formatted pkgs parameter. See '\n 'minion log.'}\n else:\n _normalize_name = \\\n __salt__.get('pkg.normalize_name', lambda pkgname: pkgname)\n to_remove = {_normalize_name(name): version}\n\n version_spec = False\n # Find out which packages will be targeted in the call to pkg.remove\n # Check current versions against specified versions\n targets = []\n problems = []\n for pkgname, pkgver in six.iteritems(to_remove):\n # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names\n origin = bool(re.search('/', pkgname))\n\n if __grains__['os'] == 'FreeBSD' and origin:\n cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname]\n else:\n cver = cur_pkgs.get(pkgname, [])\n\n # Package not installed, no need to remove\n if not cver:\n continue\n # No version specified and pkg is installed\n elif __salt__['pkg_resource.version_clean'](pkgver) is None:\n targets.append(pkgname)\n continue\n version_spec = True\n try:\n if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch):\n targets.append(pkgname)\n else:\n log.debug(\n 'Current version (%s) did not match desired version '\n 'specification (%s), will not remove', cver, pkgver\n )\n except CommandExecutionError as exc:\n problems.append(exc.strerror)\n continue\n\n if problems:\n return {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': ' '.join(problems)}\n\n if not targets:\n # All specified packages are already absent\n msg = 'All specified packages{0} are already absent'.format(\n ' (matching specified versions)' if version_spec else ''\n )\n return {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': msg}\n\n return targets\n" ]
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
purged
python
def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret
Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L2876-L2979
[ "def _uninstall(\n action='remove',\n name=None,\n version=None,\n pkgs=None,\n normalize=True,\n ignore_epoch=False,\n **kwargs):\n '''\n Common function for package removal\n '''\n if action not in ('remove', 'purge'):\n return {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': 'Invalid action \\'{0}\\'. '\n 'This is probably a bug.'.format(action)}\n\n try:\n pkg_params = __salt__['pkg_resource.parse_targets'](\n name,\n pkgs,\n normalize=normalize)[0]\n except MinionError as exc:\n return {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': 'An error was encountered while parsing targets: '\n '{0}'.format(exc)}\n targets = _find_remove_targets(name, version, pkgs, normalize,\n ignore_epoch=ignore_epoch, **kwargs)\n if isinstance(targets, dict) and 'result' in targets:\n return targets\n elif not isinstance(targets, list):\n return {'name': name,\n 'changes': {},\n 'result': False,\n 'comment': 'An error was encountered while checking targets: '\n '{0}'.format(targets)}\n if action == 'purge':\n old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True,\n removed=True,\n **kwargs)\n targets.extend([x for x in pkg_params if x in old_removed])\n targets.sort()\n\n if not targets:\n return {'name': name,\n 'changes': {},\n 'result': True,\n 'comment': 'None of the targeted packages are installed'\n '{0}'.format(' or partially installed'\n if action == 'purge' else '')}\n\n if __opts__['test']:\n return {'name': name,\n 'changes': {},\n 'result': None,\n 'comment': 'The following packages will be {0}d: '\n '{1}.'.format(action, ', '.join(targets))}\n\n changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs)\n new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs)\n failed = []\n for x in pkg_params:\n if __grains__['os_family'] in ['Suse', 'RedHat']:\n # Check if the package version set to be removed is actually removed:\n if x in new and not pkg_params[x]:\n failed.append(x)\n elif x in new and pkg_params[x] in new[x]:\n failed.append(x + \"-\" + pkg_params[x])\n elif x in new:\n failed.append(x)\n\n if action == 'purge':\n new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True,\n removed=True,\n **kwargs)\n failed.extend([x for x in pkg_params if x in new_removed])\n failed.sort()\n\n if failed:\n return {'name': name,\n 'changes': changes,\n 'result': False,\n 'comment': 'The following packages failed to {0}: '\n '{1}.'.format(action, ', '.join(failed))}\n\n comments = []\n not_installed = sorted([x for x in pkg_params if x not in targets])\n if not_installed:\n comments.append('The following packages were not installed: '\n '{0}'.format(', '.join(not_installed)))\n comments.append('The following packages were {0}d: '\n '{1}.'.format(action, ', '.join(targets)))\n else:\n comments.append('All targeted packages were {0}d.'.format(action))\n\n return {'name': name,\n 'changes': changes,\n 'result': True,\n 'comment': ' '.join(comments)}\n" ]
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
uptodate
python
def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret
.. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L2982-L3089
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def iterkeys(d, **kw):\n return d.iterkeys(**kw)\n", "def _resolve_capabilities(pkgs, refresh=False, **kwargs):\n '''\n Resolve capabilities in ``pkgs`` and exchange them with real package\n names, when the result is distinct.\n This feature can be turned on while setting the paramter\n ``resolve_capabilities`` to True.\n\n Return the input dictionary with replaced capability names and as\n second return value a bool which say if a refresh need to be run.\n\n In case of ``resolve_capabilities`` is False (disabled) or not\n supported by the implementation the input is returned unchanged.\n '''\n if not pkgs or 'pkg.resolve_capabilities' not in __salt__:\n return pkgs, refresh\n\n ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs)\n return ret, False\n" ]
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
group_installed
python
def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret
.. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L3092-L3237
null
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
mod_init
python
def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False
Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init`
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L3240-L3266
[ "def write_rtag(opts):\n '''\n Write the rtag file\n '''\n rtag_file = rtag(opts)\n if not os.path.exists(rtag_file):\n try:\n with salt.utils.files.fopen(rtag_file, 'w+'):\n pass\n except OSError as exc:\n log.warning('Encountered error writing rtag: %s', exc.__str__())\n" ]
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
saltstack/salt
salt/states/pkg.py
mod_watch
python
def mod_watch(name, **kwargs): ''' Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered. ''' sfun = kwargs.pop('sfun', None) mapfun = {'purged': purged, 'latest': latest, 'removed': removed, 'installed': installed} if sfun in mapfun: return mapfun[sfun](name, **kwargs) return {'name': name, 'changes': {}, 'comment': 'pkg.{0} does not work with the watch requisite'.format(sfun), 'result': False}
Install/reinstall a package based on a watch requisite .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/pkg.py#L3329-L3349
null
# -*- coding: utf-8 -*- ''' Installation of packages using OS package managers such as yum or apt-get ========================================================================= .. note:: On minions running systemd>=205, as of version 2015.8.12, 2016.3.3, and 2016.11.0, `systemd-run(1)`_ is now used to isolate commands which modify installed packages from the ``salt-minion`` daemon's control group. This is done to keep systemd from killing the package manager commands spawned by Salt, when Salt updates itself (see ``KillMode`` in the `systemd.kill(5)`_ manpage for more information). If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.use_scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html Salt can manage software packages via the pkg state module, packages can be set up to be installed, latest, removed and purged. Package management declarations are typically rather simple: .. code-block:: yaml vim: pkg.installed A more involved example involves pulling from a custom repository. .. code-block:: yaml base: pkgrepo.managed: - humanname: Logstash PPA - name: ppa:wolfnet/logstash - dist: precise - file: /etc/apt/sources.list.d/logstash.list - keyid: 28B04E4A - keyserver: keyserver.ubuntu.com logstash: pkg.installed: - fromrepo: ppa:wolfnet/logstash Multiple packages can also be installed with the use of the pkgs state module .. code-block:: yaml dotdeb.repo: pkgrepo.managed: - humanname: Dotdeb - name: deb http://packages.dotdeb.org wheezy-php55 all - dist: wheezy-php55 - file: /etc/apt/sources.list.d/dotbeb.list - keyid: 89DF5277 - keyserver: keys.gnupg.net - refresh_db: true php.packages: pkg.installed: - fromrepo: wheezy-php55 - pkgs: - php5-fpm - php5-cli - php5-curl .. warning:: Package names are currently case-sensitive. If the minion is using a package manager which is not case-sensitive (such as :mod:`pkgng <salt.modules.pkgng>`), then this state will fail if the proper case is not used. This will be addressed in a future release of Salt. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import fnmatch import logging import os import re # Import Salt libs import salt.utils.pkg import salt.utils.platform import salt.utils.versions from salt.output import nested from salt.utils.functools import namespaced_function as _namespaced_function from salt.utils.odict import OrderedDict as _OrderedDict from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) from salt.modules.pkg_resource import _repack_pkgs # Import 3rd-party libs from salt.ext import six # pylint: disable=invalid-name _repack_pkgs = _namespaced_function(_repack_pkgs, globals()) if salt.utils.platform.is_windows(): # pylint: disable=import-error,no-name-in-module,unused-import from salt.ext.six.moves.urllib.parse import urlparse as _urlparse from salt.exceptions import SaltRenderError import collections import datetime import errno import time from functools import cmp_to_key # pylint: disable=import-error # pylint: enable=unused-import from salt.modules.win_pkg import _get_package_info from salt.modules.win_pkg import get_repo_data from salt.modules.win_pkg import _get_repo_details from salt.modules.win_pkg import _refresh_db_conditional from salt.modules.win_pkg import refresh_db from salt.modules.win_pkg import genrepo from salt.modules.win_pkg import _repo_process_pkg_sls from salt.modules.win_pkg import _get_latest_pkg_version from salt.modules.win_pkg import _reverse_cmp_pkg_versions _get_package_info = _namespaced_function(_get_package_info, globals()) get_repo_data = _namespaced_function(get_repo_data, globals()) _get_repo_details = \ _namespaced_function(_get_repo_details, globals()) _refresh_db_conditional = \ _namespaced_function(_refresh_db_conditional, globals()) refresh_db = _namespaced_function(refresh_db, globals()) genrepo = _namespaced_function(genrepo, globals()) _repo_process_pkg_sls = \ _namespaced_function(_repo_process_pkg_sls, globals()) _get_latest_pkg_version = \ _namespaced_function(_get_latest_pkg_version, globals()) _reverse_cmp_pkg_versions = \ _namespaced_function(_reverse_cmp_pkg_versions, globals()) # The following imports are used by the namespaced win_pkg funcs # and need to be included in their globals. # pylint: disable=import-error,unused-import from salt.utils.versions import LooseVersion # pylint: enable=import-error,unused-import # pylint: enable=invalid-name log = logging.getLogger(__name__) def __virtual__(): ''' Only make these states available if a pkg provider has been detected or assigned for this minion ''' return 'pkg.install' in __salt__ def _get_comparison_spec(pkgver): ''' Return a tuple containing the comparison operator and the version. If no comparison operator was passed, the comparison is assumed to be an "equals" comparison, and "==" will be the operator returned. ''' oper, verstr = salt.utils.pkg.split_comparison(pkgver.strip()) if oper in ('=', ''): oper = '==' return oper, verstr def _parse_version_string(version_conditions_string): ''' Returns a list of two-tuples containing (operator, version). ''' result = [] version_conditions_string = version_conditions_string.strip() if not version_conditions_string: return result for version_condition in version_conditions_string.split(','): operator_and_version = _get_comparison_spec(version_condition) result.append(operator_and_version) return result def _fulfills_version_string(installed_versions, version_conditions_string, ignore_epoch=False, allow_updates=False): ''' Returns True if any of the installed versions match the specified version conditions, otherwise returns False. installed_versions The installed versions version_conditions_string The string containing all version conditions. E.G. 1.2.3-4 >=1.2.3-4 >=1.2.3-4, <2.3.4-5 >=1.2.3-4, <2.3.4-5, !=1.2.4-1 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. allow_updates : False Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. (Only applicable if only one strict version condition is specified E.G. version: 2.0.6~ubuntu3) ''' version_conditions = _parse_version_string(version_conditions_string) for installed_version in installed_versions: fullfills_all = True for operator, version_string in version_conditions: if allow_updates and len(version_conditions) == 1 and operator == '==': operator = '>=' fullfills_all = fullfills_all and _fulfills_version_spec([installed_version], operator, version_string, ignore_epoch=ignore_epoch) if fullfills_all: return True return False def _fulfills_version_spec(versions, oper, desired_version, ignore_epoch=False): ''' Returns True if any of the installed versions match the specified version, otherwise returns False ''' cmp_func = __salt__.get('pkg.version_cmp') # stripping "with_origin" dict wrapper if salt.utils.platform.is_freebsd(): if isinstance(versions, dict) and 'version' in versions: versions = versions['version'] for ver in versions: if (oper == '==' and fnmatch.fnmatch(ver, desired_version)) \ or salt.utils.versions.compare(ver1=ver, oper=oper, ver2=desired_version, cmp_func=cmp_func, ignore_epoch=ignore_epoch): return True return False def _find_unpurge_targets(desired, **kwargs): ''' Find packages which are marked to be purged but can't yet be removed because they are dependencies for other installed packages. These are the packages which will need to be 'unpurged' because they are part of pkg.installed states. This really just applies to Debian-based Linuxes. ''' return [ x for x in desired if x in __salt__['pkg.list_pkgs'](purge_desired=True, **kwargs) ] def _find_download_targets(name=None, version=None, pkgs=None, normalize=True, skip_suggestions=False, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.downloaded and discover what packages need to be downloaded. Return a dict of packages to download. ''' cur_pkgs = __salt__['pkg.list_downloaded'](**kwargs) if pkgs: to_download = _repack_pkgs(pkgs, normalize=normalize) if not to_download: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_download = {_normalize_name(name): version} else: to_download = {name: version} cver = cur_pkgs.get(name, {}) if name in to_download: # Package already downloaded, no need to download again if cver and version in cver: return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'downloaded'.format(version, name)} # if cver is not an empty string, the package is already downloaded elif cver and version is None: # The package is downloaded return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'downloaded'.format(name)} version_spec = False if not skip_suggestions: try: problems = _preflight_check(to_download, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Find out which packages will be targeted in the call to pkg.download # Check current downloaded versions against specified versions targets = {} problems = [] for pkgname, pkgver in six.iteritems(to_download): cver = cur_pkgs.get(pkgname, {}) # Package not yet downloaded, so add to targets if not cver: targets[pkgname] = pkgver continue # No version specified but package is already downloaded elif cver and not pkgver: continue version_spec = True try: if not _fulfills_version_string(cver.keys(), pkgver, ignore_epoch=ignore_epoch): targets[pkgname] = pkgver except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already downloaded msg = ( 'All specified packages{0} are already downloaded' .format(' (matching specified versions)' if version_spec else '') ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_advisory_targets(name=None, advisory_ids=None, **kwargs): ''' Inspect the arguments to pkg.patch_installed and discover what advisory patches need to be installed. Return a dict of advisory patches to install. ''' cur_patches = __salt__['pkg.list_installed_patches'](**kwargs) if advisory_ids: to_download = advisory_ids else: to_download = [name] if cur_patches.get(name, {}): # Advisory patch already installed, no need to install it again return {'name': name, 'changes': {}, 'result': True, 'comment': 'Advisory patch {0} is already ' 'installed'.format(name)} # Find out which advisory patches will be targeted in the call to pkg.install targets = [] for patch_name in to_download: cver = cur_patches.get(patch_name, {}) # Advisory patch not yet installed, so add to targets if not cver: targets.append(patch_name) continue if not targets: # All specified packages are already downloaded msg = ('All specified advisory patches are already installed') return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_remove_targets(name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Inspect the arguments to pkg.removed and discover what packages need to be removed. Return a dict of packages to remove. ''' if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if pkgs: to_remove = _repack_pkgs(pkgs, normalize=normalize) if not to_remove: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted pkgs parameter. See ' 'minion log.'} else: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) to_remove = {_normalize_name(name): version} version_spec = False # Find out which packages will be targeted in the call to pkg.remove # Check current versions against specified versions targets = [] problems = [] for pkgname, pkgver in six.iteritems(to_remove): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', pkgname)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == pkgname] else: cver = cur_pkgs.get(pkgname, []) # Package not installed, no need to remove if not cver: continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](pkgver) is None: targets.append(pkgname) continue version_spec = True try: if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): targets.append(pkgname) else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), will not remove', cver, pkgver ) except CommandExecutionError as exc: problems.append(exc.strerror) continue if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not targets: # All specified packages are already absent msg = 'All specified packages{0} are already absent'.format( ' (matching specified versions)' if version_spec else '' ) return {'name': name, 'changes': {}, 'result': True, 'comment': msg} return targets def _find_install_targets(name=None, version=None, pkgs=None, sources=None, skip_suggestions=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, refresh=False, **kwargs): ''' Inspect the arguments to pkg.installed and discover what packages need to be installed. Return a dict of desired packages ''' was_refreshed = False if all((pkgs, sources)): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Only one of "pkgs" and "sources" is permitted.'} # dict for packages that fail pkg.verify and their altered files altered_files = {} # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True if salt.utils.platform.is_windows(): # Windows requires a refresh to establish a pkg db if refresh=True, so # add it to the kwargs. kwargs['refresh'] = refresh resolve_capabilities = kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__ try: cur_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) cur_prov = resolve_capabilities and __salt__['pkg.list_provides'](**kwargs) or dict() except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} if salt.utils.platform.is_windows() and kwargs.pop('refresh', False): # We already refreshed when we called pkg.list_pkgs was_refreshed = True refresh = False if any((pkgs, sources)): if pkgs: desired = _repack_pkgs(pkgs, normalize=normalize) elif sources: desired = __salt__['pkg_resource.pack_sources']( sources, normalize=normalize, ) if not desired: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted \'{0}\' parameter. See ' 'minion log.'.format('pkgs' if pkgs else 'sources')} to_unpurge = _find_unpurge_targets(desired, **kwargs) else: if salt.utils.platform.is_windows(): pkginfo = _get_package_info(name, saltenv=kwargs['saltenv']) if not pkginfo: return {'name': name, 'changes': {}, 'result': False, 'comment': 'Package {0} not found in the ' 'repository.'.format(name)} if version is None: version = _get_latest_pkg_version(pkginfo) if normalize: _normalize_name = \ __salt__.get('pkg.normalize_name', lambda pkgname: pkgname) desired = {_normalize_name(name): version} else: desired = {name: version} to_unpurge = _find_unpurge_targets(desired, **kwargs) # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names origin = bool(re.search('/', name)) if __grains__['os'] == 'FreeBSD' and origin: cver = [k for k, v in six.iteritems(cur_pkgs) if v['origin'] == name] else: cver = cur_pkgs.get(name, []) if name not in to_unpurge: if version and version in cver \ and not reinstall \ and not pkg_verify: # The package is installed and is the correct version return {'name': name, 'changes': {}, 'result': True, 'comment': 'Version {0} of package \'{1}\' is already ' 'installed'.format(version, name)} # if cver is not an empty string, the package is already installed elif cver and version is None \ and not reinstall \ and not pkg_verify: # The package is installed return {'name': name, 'changes': {}, 'result': True, 'comment': 'Package {0} is already ' 'installed'.format(name)} version_spec = False if not sources: # Check for alternate package names if strict processing is not # enforced. Takes extra time. Disable for improved performance if not skip_suggestions: # Perform platform-specific pre-flight checks not_installed = dict([ (name, version) for name, version in desired.items() if not (name in cur_pkgs and (version is None or _fulfills_version_string(cur_pkgs[name], version))) ]) if not_installed: try: problems = _preflight_check(not_installed, **kwargs) except CommandExecutionError: pass else: comments = [] if problems.get('no_suggest'): comments.append( 'The following package(s) were not found, and no ' 'possible matches were found in the package db: ' '{0}'.format( ', '.join(sorted(problems['no_suggest'])) ) ) if problems.get('suggest'): for pkgname, suggestions in \ six.iteritems(problems['suggest']): comments.append( 'Package \'{0}\' not found (possible matches: ' '{1})'.format(pkgname, ', '.join(suggestions)) ) if comments: if len(comments) > 1: comments.append('') return {'name': name, 'changes': {}, 'result': False, 'comment': '. '.join(comments).rstrip()} # Resolve the latest package version for any packages with "latest" in the # package version wants_latest = [] \ if sources \ else [x for x, y in six.iteritems(desired) if y == 'latest'] if wants_latest: resolved_latest = __salt__['pkg.latest_version'](*wants_latest, refresh=refresh, **kwargs) if len(wants_latest) == 1: resolved_latest = {wants_latest[0]: resolved_latest} if refresh: was_refreshed = True refresh = False # pkg.latest_version returns an empty string when the package is # up-to-date. So check the currently-installed packages. If found, the # resolved latest version will be the currently installed one from # cur_pkgs. If not found, then the package doesn't exist and the # resolved latest version will be None. for key in resolved_latest: if not resolved_latest[key]: if key in cur_pkgs: resolved_latest[key] = cur_pkgs[key][-1] else: resolved_latest[key] = None # Update the desired versions with the ones we resolved desired.update(resolved_latest) # Find out which packages will be targeted in the call to pkg.install targets = {} to_reinstall = {} problems = [] warnings = [] failed_verify = False for package_name, version_string in six.iteritems(desired): cver = cur_pkgs.get(package_name, []) if resolve_capabilities and not cver and package_name in cur_prov: cver = cur_pkgs.get(cur_prov.get(package_name)[0], []) # Package not yet installed, so add to targets if not cver: targets[package_name] = version_string continue if sources: if reinstall: to_reinstall[package_name] = version_string continue elif 'lowpkg.bin_pkg_info' not in __salt__: continue # Metadata parser is available, cache the file and derive the # package's name and version err = 'Unable to cache {0}: {1}' try: cached_path = __salt__['cp.cache_file'](version_string, saltenv=kwargs['saltenv']) except CommandExecutionError as exc: problems.append(err.format(version_string, exc)) continue if not cached_path: problems.append(err.format(version_string, 'file not found')) continue elif not os.path.exists(cached_path): problems.append('{0} does not exist on minion'.format(version_string)) continue source_info = __salt__['lowpkg.bin_pkg_info'](cached_path) if source_info is None: warnings.append('Failed to parse metadata for {0}'.format(version_string)) continue else: verstr = source_info['version'] else: verstr = version_string if reinstall: to_reinstall[package_name] = version_string continue if not __salt__['pkg_resource.check_extra_requirements'](package_name, version_string): targets[package_name] = version_string continue # No version specified and pkg is installed elif __salt__['pkg_resource.version_clean'](version_string) is None: if (not reinstall) and pkg_verify: try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result continue version_fulfilled = False allow_updates = bool(not sources and kwargs.get('allow_updates')) try: version_fulfilled = _fulfills_version_string(cver, verstr, ignore_epoch=ignore_epoch, allow_updates=allow_updates) except CommandExecutionError as exc: problems.append(exc.strerror) continue # Compare desired version against installed version. version_spec = True if not version_fulfilled: if reinstall: to_reinstall[package_name] = version_string else: version_conditions = _parse_version_string(version_string) if pkg_verify and any(oper == '==' for oper, version in version_conditions): try: verify_result = __salt__['pkg.verify']( package_name, ignore_types=ignore_types, verify_options=verify_options, **kwargs ) except (CommandExecutionError, SaltInvocationError) as exc: failed_verify = exc.strerror continue if verify_result: to_reinstall[package_name] = version_string altered_files[package_name] = verify_result else: log.debug( 'Current version (%s) did not match desired version ' 'specification (%s), adding to installation targets', cver, version_string ) targets[package_name] = version_string if failed_verify: problems.append(failed_verify) if problems: return {'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems)} if not any((targets, to_unpurge, to_reinstall)): # All specified packages are installed msg = 'All specified packages are already installed{0}' msg = msg.format( ' and are at the desired version' if version_spec and not sources else '' ) ret = {'name': name, 'changes': {}, 'result': True, 'comment': msg} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret return (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) def _verify_install(desired, new_pkgs, ignore_epoch=False, new_caps=None): ''' Determine whether or not the installed packages match what was requested in the SLS file. ''' ok = [] failed = [] if not new_caps: new_caps = dict() for pkgname, pkgver in desired.items(): # FreeBSD pkg supports `openjdk` and `java/openjdk7` package names. # Homebrew for Mac OSX does something similar with tap names # prefixing package names, separated with a slash. has_origin = '/' in pkgname if __grains__['os'] == 'FreeBSD' and has_origin: cver = [k for k, v in six.iteritems(new_pkgs) if v['origin'] == pkgname] elif __grains__['os'] == 'MacOS' and has_origin: cver = new_pkgs.get(pkgname, new_pkgs.get(pkgname.split('/')[-1])) elif __grains__['os'] == 'OpenBSD': cver = new_pkgs.get(pkgname.split('%')[0]) elif __grains__['os_family'] == 'Debian': cver = new_pkgs.get(pkgname.split('=')[0]) else: cver = new_pkgs.get(pkgname) if not cver and pkgname in new_caps: cver = new_pkgs.get(new_caps.get(pkgname)[0]) if not cver: failed.append(pkgname) continue elif pkgver == 'latest': ok.append(pkgname) continue elif not __salt__['pkg_resource.version_clean'](pkgver): ok.append(pkgname) continue elif pkgver.endswith("*") and cver[0].startswith(pkgver[:-1]): ok.append(pkgname) continue if _fulfills_version_string(cver, pkgver, ignore_epoch=ignore_epoch): ok.append(pkgname) else: failed.append(pkgname) return ok, failed def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name]) def _preflight_check(desired, fromrepo, **kwargs): ''' Perform platform-specific checks on desired packages ''' if 'pkg.check_db' not in __salt__: return {} ret = {'suggest': {}, 'no_suggest': []} pkginfo = __salt__['pkg.check_db']( *list(desired.keys()), fromrepo=fromrepo, **kwargs ) for pkgname in pkginfo: if pkginfo[pkgname]['found'] is False: if pkginfo[pkgname]['suggestions']: ret['suggest'][pkgname] = pkginfo[pkgname]['suggestions'] else: ret['no_suggest'].append(pkgname) return ret def _nested_output(obj): ''' Serialize obj and format for output ''' nested.__opts__ = __opts__ ret = nested.output(obj).rstrip() return ret def _resolve_capabilities(pkgs, refresh=False, **kwargs): ''' Resolve capabilities in ``pkgs`` and exchange them with real package names, when the result is distinct. This feature can be turned on while setting the paramter ``resolve_capabilities`` to True. Return the input dictionary with replaced capability names and as second return value a bool which say if a refresh need to be run. In case of ``resolve_capabilities`` is False (disabled) or not supported by the implementation the input is returned unchanged. ''' if not pkgs or 'pkg.resolve_capabilities' not in __salt__: return pkgs, refresh ret = __salt__['pkg.resolve_capabilities'](pkgs, refresh=refresh, **kwargs) return ret, False def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, update_holds=False, bypass_file=None, bypass_file_contains=None, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`pkgin <salt.modules.pkgin>`, :mod:`win_pkg <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs bash myminion: ---------- bash: - 4.2.46-21.el7_3 - 4.2.46-20.el7_2 This function was first added for :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` in 2014.1.0, and was expanded to :py:func:`Debian/Ubuntu <salt.modules.aptpkg.list_repo_pkgs>` and :py:func:`Arch Linux <salt.modules.pacman.list_repo_pkgs>`-based distros in the 2017.7.0 release. The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 If the version given is the string ``latest``, the latest available package version will be installed à la ``pkg.latest``. **WILDCARD VERSIONS** As of the 2017.7.0 release, this state now supports wildcards in package versions for SUSE SLES/Leap/Tumbleweed, Debian/Ubuntu, RHEL/CentOS, Arch Linux, and their derivatives. Using wildcards can be useful for packages where the release name is built into the version in some way, such as for RHEL/CentOS which typically has version numbers like ``1.2.34-5.el7``. An example of the usage for this would be: .. code-block:: yaml mypkg: pkg.installed: - version: '1.2.34*' Keep in mind that using wildcard versions will result in a slower state run since Salt must gather the available versions of the specified packages and figure out which of them match the specified wildcard expression. :param bool refresh: This parameter controls whether or not the package repo database is updated prior to installing the requested package(s). If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before installing. If ``False``, the package database will *not* be refreshed before installing. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - cache_valid_time: 300 - allow_updates: True - hold: False In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify. Additionally, ``verify_options`` can be used to modify further the behavior of pkg.verify. See examples below. Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: - config - doc - verify_options: - nodeps - nofiledigest :param list ignore_types: List of types to ignore when verifying the package .. versionadded:: 2014.7.0 :param list verify_options: List of additional options to pass when verifying the package. These options will be added to the ``rpm -V`` command, prepended with ``--`` (for example, when ``nodeps`` is passed in this option, ``rpm -V`` will be run with ``--nodeps``). .. versionadded:: 2016.11.0 :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`winrepo <salt.modules.win_pkg>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`zypper <salt.modules.zypper>`, :mod:`yum/dnf <salt.modules.yumpkg>`, and :mod:`apt <salt.modules.aptpkg>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2014.7.0 :param bool update_holds: If ``True``, and this function would update the package version, any packages which are being held will be temporarily unheld so that they can be updated. Otherwise, if this function attempts to update a held package, the held package(s) will be skipped and the state will fail. By default, this parameter is set to ``False``. Currently works with YUM/DNF & APT based systems. .. versionadded:: 2016.11.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. :param bool report_reboot_exit_codes: If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.installed: - name: ms-vcpp - version: 10.0.40219 - report_reboot_exit_codes: False :param str bypass_file: If you wish to bypass the full package validation process, you can specify a file related to the installed pacakge as a way to validate the pacakge has already been installed. A good example would be a config file that is deployed with the package. Another bypass_file could be ``/run/salt-minon.pid``. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf The use case for this feature is when running salt at significant scale. Each state that has a requisite for a ``pkg.installed`` will have salt querying the package manager of the system. Compared to simple diff checks, querying the pacakge manager is a lengthy process. This feature is an attempt to reduce the run time of states. If only a config change is being made but you wish to keep all of the self resolving requisites this bypasses the lenghty cost of the package manager. The assumption is that if this file is present, the package should already be installed. :param str bypass_file_contains: This option can only be used in conjunction with the ``bypass_file`` option. It is to provide a second layer of validation before bypassing the ``pkg.installed`` process. .. code-block:: yaml install_ntp: pkg.installed: - name: ntp - bypass_file: /etc/ntp.conf - bypass_file_contains: version-20181218 The will have salt check to see if the file contains the specified string. If the value is found, the ``pkg.installed`` process will be bypassed under the assumption that two pieces of validation have passed and the package is already installed. .. warning:: Do not try and use ``{{ salt['pkg.version']('ntp') }}`` in a jinja template as part of your bypass_file_contains match. This will trigger a ``pkg.version`` lookup with the pacakge manager and negate any time saved by trying to use the bypass feature. :return: A dictionary containing the state of the software installation :rtype dict: .. note:: The ``pkg.installed`` state supports the usage of ``reload_modules``. This functionality allows you to force Salt to reload all modules. In many cases, Salt is clever enough to transparently reload the modules. For example, if you install a package, Salt reloads modules because some other module or state might require the package which was installed. However, there are some edge cases where this may not be the case, which is what ``reload_modules`` is meant to resolve. You should only use ``reload_modules`` if your ``pkg.installed`` does some sort of installation where if you do not reload the modules future items in your state which rely on the software being installed will fail. Please see the :ref:`Reloading Modules <reloading-modules>` documentation for more information. ''' if not pkgs and isinstance(pkgs, list): return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not any((pkgs, sources)): if version: pkgs = [{name: version}] version = None else: pkgs = [name] kwargs['saltenv'] = __env__ refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if bypass_file is not None and bypass_file_contains is not None: if os.path.isfile(bypass_file): with salt.utils.fopen(bypass_file) as bypass_file_open: if bypass_file_contains in bypass_file_open.read(): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as {} was present in {}'.format(bypass_file_contains, bypass_file)} if bypass_file is not None and bypass_file_contains is None: if os.path.isfile(bypass_file): return {'name': name, 'changes': {}, 'result': True, 'comment': 'pkg.installed was bypassed as bypass_file {} was present'.format(bypass_file)} # check if capabilities should be checked and modify the requested packages # accordingly. if pkgs: pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = six.text_type(version) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, refresh=refresh, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings, was_refreshed) = result if was_refreshed: refresh = False except ValueError: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': six.text_type(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] for i in modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] for i in not_modified_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] for i in failed_hold: result['comment'] += '.\n{0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name=None, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, update_holds=update_holds, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret.setdefault('warnings', []).extend(warnings) return ret if refresh: refresh = False if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__ and 'hold' in kwargs: try: action = 'pkg.hold' if kwargs['hold'] else 'pkg.unhold' hold_ret = __salt__[action]( name=name, pkgs=desired ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(six.text_type(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) if kwargs.get('resolve_capabilities', False) and 'pkg.list_provides' in __salt__: new_caps = __salt__['pkg.list_provides'](**kwargs) else: new_caps = {} ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch, new_caps=new_caps) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if changes[change_name]['new']: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if changes[change_name]['old']: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) else: comment.append(i['comment']) changes[change_name] = {} changes[change_name]['new'] = '{0}'.format(i['changes']['new']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Get the verify_options list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('verify_options') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'verify_options' in x): verify_options = next(x.get('verify_options') for x in pkg_verify if 'verify_options' in x) else: verify_options = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: # No need to wrap this in a try/except because we would already # have caught invalid arguments earlier. verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types, verify_options=verify_options, **kwargs) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret.setdefault('warnings', []).extend(warnings) return ret def downloaded(name, version=None, pkgs=None, fromrepo=None, ignore_epoch=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that the package is downloaded, and that it is the correct version (if specified). Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` :param str name: The name of the package to be downloaded. This parameter is ignored if either "pkgs" is used. Additionally, please note that this option can only be used to download packages from a software repository. :param str version: Download a specific version of a package. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.downloaded: - version: 2:7.4.160-1.el7 An **ignore_epoch** argument has been added to which causes the epoch to be disregarded when the state checks to see if the desired version was installed. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.downloaded: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: yaml zsh: pkg.downloaded: - version: 5.0.5-4.63 - fromrepo: "myrepository" ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_downloaded' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.downloaded state is not available on ' \ 'this platform' return ret if not pkgs and isinstance(pkgs, list): ret['result'] = True ret['comment'] = 'No packages to download provided' return ret # If just a name (and optionally a version) is passed, just pack them into # the pkgs argument. if name and not pkgs: if version: pkgs = [{name: version}] version = None else: pkgs = [name] # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] pkgs, _refresh = _resolve_capabilities(pkgs, **kwargs) # Only downloading not yet downloaded packages targets = _find_download_targets(name, version, pkgs, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, dict): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following packages would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, pkgs=pkgs, version=version, downloadonly=True, fromrepo=fromrepo, ignore_epoch=ignore_epoch, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = 'An error was encountered while downloading ' \ 'package(s): {0}'.format(exc) return ret new_pkgs = __salt__['pkg.list_downloaded'](**kwargs) ok, failed = _verify_install(targets, new_pkgs, ignore_epoch=ignore_epoch) if failed: summary = ', '.join([_get_desired_pkg(x, targets) for x in failed]) ret['result'] = False ret['comment'] = 'The following packages failed to ' \ 'download: {0}'.format(summary) if not ret['changes'] and not ret['comment']: ret['result'] = True ret['comment'] = 'Packages are already downloaded: ' \ '{0}'.format(', '.join(targets)) return ret def patch_installed(name, advisory_ids=None, downloadonly=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are installed. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml issue-foo-fixed: pkg.patch_installed: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' ret = {'name': name, 'changes': {}, 'result': None, 'comment': ''} if 'pkg.list_patches' not in __salt__: ret['result'] = False ret['comment'] = 'The pkg.patch_installed state is not available on ' \ 'this platform' return ret if not advisory_ids and isinstance(advisory_ids, list): ret['result'] = True ret['comment'] = 'No advisory ids provided' return ret # Only downloading not yet downloaded packages targets = _find_advisory_targets(name, advisory_ids, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): ret['result'] = False ret['comment'] = 'An error was encountered while checking targets: ' \ '{0}'.format(targets) return ret if __opts__['test']: summary = ', '.join(targets) ret['comment'] = 'The following advisory patches would be ' \ 'downloaded: {0}'.format(summary) return ret try: pkg_ret = __salt__['pkg.install'](name=name, advisory_ids=advisory_ids, downloadonly=downloadonly, **kwargs) ret['result'] = True ret['changes'].update(pkg_ret) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while downloading ' 'package(s): {0}'.format(exc)) return ret if not ret['changes'] and not ret['comment']: status = 'downloaded' if downloadonly else 'installed' ret['result'] = True ret['comment'] = ('Advisory patch is not needed or related packages ' 'are already {0}'.format(status)) return ret def patch_downloaded(name, advisory_ids=None, **kwargs): ''' .. versionadded:: 2017.7.0 Ensure that packages related to certain advisory ids are downloaded. Currently supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>` and :mod:`zypper <salt.modules.zypper>` CLI Example: .. code-block:: yaml preparing-to-fix-issues: pkg.patch_downloaded: - advisory_ids: - SUSE-SLE-SERVER-12-SP2-2017-185 - SUSE-SLE-SERVER-12-SP2-2017-150 - SUSE-SLE-SERVER-12-SP2-2017-120 ''' if 'pkg.list_patches' not in __salt__: return {'name': name, 'result': False, 'changes': {}, 'comment': 'The pkg.patch_downloaded state is not available on ' 'this platform'} # It doesn't make sense here to received 'downloadonly' as kwargs # as we're explicitly passing 'downloadonly=True' to execution module. if 'downloadonly' in kwargs: del kwargs['downloadonly'] return patch_installed(name=name, advisory_ids=advisory_ids, downloadonly=True, **kwargs) def latest( name, refresh=None, fromrepo=None, skip_verify=False, pkgs=None, watch_flags=True, **kwargs): ''' Ensure that the named package is installed and the latest available package. If the package can be updated, this state function will update the package. Generally it is better for the :mod:`installed <salt.states.pkg.installed>` function to be used, as :mod:`latest <salt.states.pkg.latest>` will update the package whenever a new package is available. name The name of the package to maintain at the latest available version. This parameter is ignored if "pkgs" is used. fromrepo Specify a repository from which to install skip_verify Skip the GPG verification check for the package to be installed refresh This parameter controls whether or not the package repo database is updated prior to checking for the latest available version of the requested packages. If ``True``, the package database will be refreshed (``apt-get update`` or equivalent, depending on platform) before checking for the latest available version of the requested packages. If ``False``, the package database will *not* be refreshed before checking. If unset, then Salt treats package database refreshes differently depending on whether or not a ``pkg`` state has been executed already during the current Salt run. Once a refresh has been performed in a ``pkg`` state, for the remainder of that Salt run no other refreshes will be performed for ``pkg`` states which do not explicitly set ``refresh`` to ``True``. This prevents needless additional refreshes from slowing down the Salt run. :param str cache_valid_time: .. versionadded:: 2016.11.0 This parameter sets the value in seconds after which the cache is marked as invalid, and a cache update is necessary. This overwrites the ``refresh`` parameter's default behavior. Example: .. code-block:: yaml httpd: pkg.latest: - refresh: True - cache_valid_time: 300 In this case, a refresh will not take place for 5 minutes since the last ``apt-get update`` was executed on the system. .. note:: This parameter is available only on Debian based distributions and has no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 Multiple Package Installation Options: (Not yet supported for: FreeBSD, OpenBSD, MacOS, and Solaris pkgutil) pkgs A list of packages to maintain at the latest available version. .. code-block:: yaml mypkgs: pkg.latest: - pkgs: - foo - bar - baz install_recommends Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - install_recommends: False only_upgrade Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.latest: - only_upgrade: True .. note:: If this parameter is set to True and the package is not already installed, the state will fail. report_reboot_exit_codes If the installer exits with a recognized exit code indicating that a reboot is required, the module function *win_system.set_reboot_required_witnessed* will be called, preserving the knowledge of this event for the remainder of the current boot session. For the time being, ``3010`` is the only recognized exit code, but this is subject to future refinement. The value of this param defaults to ``True``. This parameter has no effect on non-Windows systems. .. versionadded:: 2016.11.0 .. code-block:: yaml ms vcpp installed: pkg.latest: - name: ms-vcpp - report_reboot_exit_codes: False ''' refresh = salt.utils.pkg.check_refresh(__opts__, refresh) if kwargs.get('sources'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'The "sources" parameter is not supported.'} elif pkgs: desired_pkgs = list(_repack_pkgs(pkgs).keys()) if not desired_pkgs: # Badly-formatted SLS return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalidly formatted "pkgs" parameter. See ' 'minion log.'} else: if not pkgs and isinstance(pkgs, list): return { 'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided' } else: desired_pkgs = [name] kwargs['saltenv'] = __env__ # check if capabilities should be checked and modify the requested packages # accordingly. desired_pkgs, refresh = _resolve_capabilities(desired_pkgs, refresh=refresh, **kwargs) try: avail = __salt__['pkg.latest_version'](*desired_pkgs, fromrepo=fromrepo, refresh=refresh, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking the ' 'newest available version of package(s): {0}' .format(exc)} try: cur = __salt__['pkg.version'](*desired_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': exc.strerror} # Repack the cur/avail data if only a single package is being checked if isinstance(cur, six.string_types): cur = {desired_pkgs[0]: cur} if isinstance(avail, six.string_types): avail = {desired_pkgs[0]: avail} targets = {} problems = [] for pkg in desired_pkgs: if not avail.get(pkg): # Package either a) is up-to-date, or b) does not exist if not cur.get(pkg): # Package does not exist msg = 'No information found for \'{0}\'.'.format(pkg) log.error(msg) problems.append(msg) elif watch_flags \ and __grains__.get('os_family') == 'Gentoo' \ and __salt__['portage_config.is_changed_uses'](pkg): # Package is up-to-date, but Gentoo USE flags are changing so # we need to add it to the targets targets[pkg] = cur[pkg] else: # Package either a) is not installed, or b) is installed and has an # upgrade available targets[pkg] = avail[pkg] if problems: return { 'name': name, 'changes': {}, 'result': False, 'comment': ' '.join(problems) } if targets: # Find up-to-date packages if not pkgs: # There couldn't have been any up-to-date packages if this state # only targeted a single package and is being allowed to proceed to # the install step. up_to_date = [] else: up_to_date = [x for x in pkgs if x not in targets] if __opts__['test']: comments = [] comments.append( 'The following packages would be installed/upgraded: ' + ', '.join(sorted(targets)) ) if up_to_date: up_to_date_count = len(up_to_date) if up_to_date_count <= 10: comments.append( 'The following packages are already up-to-date: ' + ', '.join( ['{0} ({1})'.format(x, cur[x]) for x in sorted(up_to_date)] ) ) else: comments.append( '{0} packages are already up-to-date' .format(up_to_date_count) ) return {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comments)} if salt.utils.platform.is_windows(): # pkg.install execution module on windows ensures the software # package is installed when no version is specified, it does not # upgrade the software to the latest. This is per the design. # Build updated list of pkgs *with verion number*, exclude # non-targeted ones targeted_pkgs = [{x: targets[x]} for x in targets] else: # Build updated list of pkgs to exclude non-targeted ones targeted_pkgs = list(targets) # No need to refresh, if a refresh was necessary it would have been # performed above when pkg.latest_version was run. try: changes = __salt__['pkg.install'](name=None, refresh=False, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=targeted_pkgs, **kwargs) except CommandExecutionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while installing ' 'package(s): {0}'.format(exc)} if changes: # Find failed and successful updates failed = [x for x in targets if not changes.get(x) or changes[x].get('new') != targets[x] and targets[x] != 'latest'] successful = [x for x in targets if x not in failed] comments = [] if failed: msg = 'The following packages failed to update: ' \ '{0}'.format(', '.join(sorted(failed))) comments.append(msg) if successful: msg = 'The following packages were successfully ' \ 'installed/upgraded: ' \ '{0}'.format(', '.join(sorted(successful))) comments.append(msg) if up_to_date: if len(up_to_date) <= 10: msg = 'The following packages were already up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: msg = '{0} packages were already up-to-date '.format( len(up_to_date)) comments.append(msg) return {'name': name, 'changes': changes, 'result': False if failed else True, 'comment': ' '.join(comments)} else: if len(targets) > 10: comment = ('{0} targeted packages failed to update. ' 'See debug log for details.'.format(len(targets))) elif len(targets) > 1: comment = ('The following targeted packages failed to update. ' 'See debug log for details: ({0}).' .format(', '.join(sorted(targets)))) else: comment = 'Package {0} failed to ' \ 'update.'.format(next(iter(list(targets.keys())))) if up_to_date: if len(up_to_date) <= 10: comment += ' The following packages were already ' \ 'up-to-date: ' \ '{0}'.format(', '.join(sorted(up_to_date))) else: comment += '{0} packages were already ' \ 'up-to-date'.format(len(up_to_date)) return {'name': name, 'changes': changes, 'result': False, 'comment': comment} else: if len(desired_pkgs) > 10: comment = 'All {0} packages are up-to-date.'.format( len(desired_pkgs)) elif len(desired_pkgs) > 1: comment = 'All packages are up-to-date ' \ '({0}).'.format(', '.join(sorted(desired_pkgs))) else: comment = 'Package {0} is already ' \ 'up-to-date'.format(desired_pkgs[0]) return {'name': name, 'changes': {}, 'result': True, 'comment': comment} def _uninstall( action='remove', name=None, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Common function for package removal ''' if action not in ('remove', 'purge'): return {'name': name, 'changes': {}, 'result': False, 'comment': 'Invalid action \'{0}\'. ' 'This is probably a bug.'.format(action)} try: pkg_params = __salt__['pkg_resource.parse_targets']( name, pkgs, normalize=normalize)[0] except MinionError as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while parsing targets: ' '{0}'.format(exc)} targets = _find_remove_targets(name, version, pkgs, normalize, ignore_epoch=ignore_epoch, **kwargs) if isinstance(targets, dict) and 'result' in targets: return targets elif not isinstance(targets, list): return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while checking targets: ' '{0}'.format(targets)} if action == 'purge': old_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) targets.extend([x for x in pkg_params if x in old_removed]) targets.sort() if not targets: return {'name': name, 'changes': {}, 'result': True, 'comment': 'None of the targeted packages are installed' '{0}'.format(' or partially installed' if action == 'purge' else '')} if __opts__['test']: return {'name': name, 'changes': {}, 'result': None, 'comment': 'The following packages will be {0}d: ' '{1}.'.format(action, ', '.join(targets))} changes = __salt__['pkg.{0}'.format(action)](name, pkgs=pkgs, version=version, **kwargs) new = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) failed = [] for x in pkg_params: if __grains__['os_family'] in ['Suse', 'RedHat']: # Check if the package version set to be removed is actually removed: if x in new and not pkg_params[x]: failed.append(x) elif x in new and pkg_params[x] in new[x]: failed.append(x + "-" + pkg_params[x]) elif x in new: failed.append(x) if action == 'purge': new_removed = __salt__['pkg.list_pkgs'](versions_as_list=True, removed=True, **kwargs) failed.extend([x for x in pkg_params if x in new_removed]) failed.sort() if failed: return {'name': name, 'changes': changes, 'result': False, 'comment': 'The following packages failed to {0}: ' '{1}.'.format(action, ', '.join(failed))} comments = [] not_installed = sorted([x for x in pkg_params if x not in targets]) if not_installed: comments.append('The following packages were not installed: ' '{0}'.format(', '.join(not_installed))) comments.append('The following packages were {0}d: ' '{1}.'.format(action, ', '.join(targets))) else: comments.append('All targeted packages were {0}d.'.format(action)) return {'name': name, 'changes': changes, 'result': True, 'comment': ' '.join(comments)} def removed(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.remove`` if necessary to remove the package. name The name of the package to be removed. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.removed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.removed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to remove. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='remove', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while removing ' 'package(s): {0}'.format(exc)) return ret def purged(name, version=None, pkgs=None, normalize=True, ignore_epoch=False, **kwargs): ''' Verify that a package is not installed, calling ``pkg.purge`` if necessary to purge the package. All configuration files are also removed. name The name of the package to be purged. version The version of the package that should be removed. Don't do anything if the package is installed with an unmatching version. .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the example above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.removed>`, and :py:mod:`pkg.purged <salt.states.pkg.purged>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. If **ignore_epoch** was not set to ``True``, and instead of ``2:7.4.160-1.el7`` a version of ``7.4.160-1.el7`` were used, this state would report success since the actual installed version includes the epoch, and the specified version would not match. normalize : True Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2015.8.0 ignore_epoch : False When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.purged: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the state would falsely report success since the actual installed version is ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.purged: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 Multiple Package Options: pkgs A list of packages to purge. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. It accepts version numbers as well. .. versionadded:: 0.16.0 ''' kwargs['saltenv'] = __env__ try: return _uninstall(action='purge', name=name, version=version, pkgs=pkgs, normalize=normalize, ignore_epoch=ignore_epoch, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while purging ' 'package(s): {0}'.format(exc)) return ret def uptodate(name, refresh=False, pkgs=None, **kwargs): ''' .. versionadded:: 2014.7.0 .. versionchanged:: 2018.3.0 Added support for the ``pkgin`` provider. Verify that the system is completely up to date. name The name has no functional value and is only used as a tracking reference refresh refresh the package database before checking for new upgrades pkgs list of packages to upgrade :param str cache_valid_time: This parameter sets the value in seconds after which cache marked as invalid, and cache update is necessary. This overwrite ``refresh`` parameter default behavior. In this case cache_valid_time is set, refresh will not take place for amount in seconds since last ``apt-get update`` executed on the system. .. note:: This parameter available only on Debian based distributions, and have no effect on the rest. :param bool resolve_capabilities: Turn on resolving capabilities. This allow one to name "provides" or alias names for packages. .. versionadded:: 2018.3.0 kwargs Any keyword arguments to pass through to ``pkg.upgrade``. .. versionadded:: 2015.5.0 ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'Failed to update'} if 'pkg.list_upgrades' not in __salt__: ret['comment'] = 'State pkg.uptodate is not available' return ret # emerge --update doesn't appear to support repo notation if 'fromrepo' in kwargs and __grains__['os_family'] == 'Gentoo': ret['comment'] = '\'fromrepo\' argument not supported on this platform' return ret if isinstance(refresh, bool): pkgs, refresh = _resolve_capabilities(pkgs, refresh=refresh, **kwargs) try: packages = __salt__['pkg.list_upgrades'](refresh=refresh, **kwargs) expected = {pkgname: {'new': pkgver, 'old': __salt__['pkg.version'](pkgname, **kwargs)} for pkgname, pkgver in six.iteritems(packages)} if isinstance(pkgs, list): packages = [pkg for pkg in packages if pkg in pkgs] expected = {pkgname: pkgver for pkgname, pkgver in six.iteritems(expected) if pkgname in pkgs} except Exception as exc: ret['comment'] = six.text_type(exc) return ret else: ret['comment'] = 'refresh must be either True or False' return ret if not packages: ret['comment'] = 'System is already up-to-date' ret['result'] = True return ret elif __opts__['test']: ret['comment'] = 'System update will be performed' ret['changes'] = expected ret['result'] = None return ret try: ret['changes'] = __salt__['pkg.upgrade'](refresh=refresh, pkgs=pkgs, **kwargs) except CommandExecutionError as exc: if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while updating ' 'packages: {0}'.format(exc)) return ret # If a package list was provided, ensure those packages were updated missing = [] if isinstance(pkgs, list): missing = [pkg for pkg in six.iterkeys(expected) if pkg not in ret['changes']] if missing: ret['comment'] = 'The following package(s) failed to update: {0}'.format(', '.join(missing)) ret['result'] = False else: ret['comment'] = 'Upgrade ran successfully' ret['result'] = True return ret def group_installed(name, skip=None, include=None, **kwargs): ''' .. versionadded:: 2015.8.0 .. versionchanged:: 2016.11.0 Added support in :mod:`pacman <salt.modules.pacman>` Ensure that an entire package group is installed. This state is currently only supported for the :mod:`yum <salt.modules.yumpkg>` and :mod:`pacman <salt.modules.pacman>` package managers. skip Packages that would normally be installed by the package group ("default" packages), which should not be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - skip: - piranha include Packages which are included in a group, which would not normally be installed by a ``yum groupinstall`` ("optional" packages). Note that this will not enforce group membership; if you include packages which are not members of the specified groups, they will still be installed. .. code-block:: yaml Load Balancer: pkg.group_installed: - include: - haproxy .. versionchanged:: 2016.3.0 This option can no longer be passed as a comma-separated list, it must now be passed as a list (as shown in the above example). .. note:: Because this is essentially a wrapper around :py:func:`pkg.install <salt.modules.yumpkg.install>`, any argument which can be passed to pkg.install may also be included here, and it will be passed on to the call to :py:func:`pkg.install <salt.modules.yumpkg.install>`. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} if 'pkg.group_diff' not in __salt__: ret['comment'] = 'pkg.group_install not available for this platform' return ret if skip is None: skip = [] else: if not isinstance(skip, list): ret['comment'] = 'skip must be formatted as a list' return ret for idx, item in enumerate(skip): if not isinstance(item, six.string_types): skip[idx] = six.text_type(item) if include is None: include = [] else: if not isinstance(include, list): ret['comment'] = 'include must be formatted as a list' return ret for idx, item in enumerate(include): if not isinstance(item, six.string_types): include[idx] = six.text_type(item) try: diff = __salt__['pkg.group_diff'](name) except CommandExecutionError as err: ret['comment'] = ('An error was encountered while installing/updating ' 'group \'{0}\': {1}.'.format(name, err)) return ret mandatory = diff['mandatory']['installed'] + \ diff['mandatory']['not installed'] invalid_skip = [x for x in mandatory if x in skip] if invalid_skip: ret['comment'] = ( 'The following mandatory packages cannot be skipped: {0}' .format(', '.join(invalid_skip)) ) return ret targets = diff['mandatory']['not installed'] targets.extend([x for x in diff['default']['not installed'] if x not in skip]) targets.extend(include) if not targets: ret['result'] = True ret['comment'] = 'Group \'{0}\' is already installed'.format(name) return ret partially_installed = diff['mandatory']['installed'] \ or diff['default']['installed'] \ or diff['optional']['installed'] if __opts__['test']: ret['result'] = None if partially_installed: ret['comment'] = ( 'Group \'{0}\' is partially installed and will be updated' .format(name) ) else: ret['comment'] = 'Group \'{0}\' will be installed'.format(name) return ret try: ret['changes'] = __salt__['pkg.install'](pkgs=targets, **kwargs) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while ' 'installing/updating group \'{0}\': {1}' .format(name, exc)) return ret failed = [x for x in targets if x not in __salt__['pkg.list_pkgs'](**kwargs)] if failed: ret['comment'] = ( 'Failed to install the following packages: {0}' .format(', '.join(failed)) ) return ret ret['result'] = True ret['comment'] = 'Group \'{0}\' was {1}'.format( name, 'updated' if partially_installed else 'installed' ) return ret def mod_init(low): ''' Set a flag to tell the install functions to refresh the package database. This ensures that the package database is refreshed only once during a state run significantly improving the speed of package management during a state run. It sets a flag for a number of reasons, primarily due to timeline logic. When originally setting up the mod_init for pkg a number of corner cases arose with different package managers and how they refresh package data. It also runs the "ex_mod_init" from the package manager module that is currently loaded. The "ex_mod_init" is expected to work as a normal "mod_init" function. .. seealso:: :py:func:`salt.modules.ebuild.ex_mod_init` ''' ret = True if 'pkg.ex_mod_init' in __salt__: ret = __salt__['pkg.ex_mod_init'](low) if low['fun'] == 'installed' or low['fun'] == 'latest': salt.utils.pkg.write_rtag(__opts__) return ret return False def mod_aggregate(low, chunks, running): ''' The mod_aggregate function which looks up all packages in the available low chunks and merges them into a single pkgs ref in the present low data ''' pkgs = [] pkg_type = None agg_enabled = [ 'installed', 'latest', 'removed', 'purged', ] if low.get('fun') not in agg_enabled: return low for chunk in chunks: tag = __utils__['state.gen_tag'](chunk) if tag in running: # Already ran the pkg state, skip aggregation continue if chunk.get('state') == 'pkg': if '__agg__' in chunk: continue # Check for the same function if chunk.get('fun') != low.get('fun'): continue # Check for the same repo if chunk.get('fromrepo') != low.get('fromrepo'): continue # Check first if 'sources' was passed so we don't aggregate pkgs # and sources together. if 'sources' in chunk: if pkg_type is None: pkg_type = 'sources' if pkg_type == 'sources': pkgs.extend(chunk['sources']) chunk['__agg__'] = True else: if pkg_type is None: pkg_type = 'pkgs' if pkg_type == 'pkgs': # Pull out the pkg names! if 'pkgs' in chunk: pkgs.extend(chunk['pkgs']) chunk['__agg__'] = True elif 'name' in chunk: version = chunk.pop('version', None) if version is not None: pkgs.append({chunk['name']: version}) else: pkgs.append(chunk['name']) chunk['__agg__'] = True if pkg_type is not None and pkgs: if pkg_type in low: low[pkg_type].extend(pkgs) else: low[pkg_type] = pkgs return low
saltstack/salt
salt/states/openvswitch_port.py
present
python
def present(name, bridge, tunnel_type=None, id=None, remote=None, dst_port=None, internal=False): ''' Ensures that the named port exists on bridge, eventually creates it. Args: name: The name of the port. bridge: The name of the bridge. tunnel_type: Optional type of interface to create, currently supports: vlan, vxlan and gre. id: Optional tunnel's key. remote: Remote endpoint's IP address. dst_port: Port to use when creating tunnelport in the switch. internal: Create an internal port if one does not exist ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} tunnel_types = ('vlan', 'vxlan', 'gre') if tunnel_type and tunnel_type not in tunnel_types: raise TypeError('The optional type argument must be one of these values: {0}.'.format( six.text_type(tunnel_types)) ) bridge_exists = __salt__['openvswitch.bridge_exists'](bridge) port_list = [] if bridge_exists: port_list = __salt__['openvswitch.port_list'](bridge) # Comment and change messages comments = {} comments['comment_bridge_notexists'] = 'Bridge {0} does not exist.'.format(bridge) comments['comment_port_exists'] = 'Port {0} already exists.'.format(name) comments['comment_port_created'] = 'Port {0} created on bridge {1}.'.format(name, bridge) comments['comment_port_notcreated'] = 'Unable to create port {0} on bridge {1}.'.format(name, bridge) comments['changes_port_created'] = {name: {'old': 'No port named {0} present.'.format(name), 'new': 'Created port {1} on bridge {0}.'.format(bridge, name), } } comments['comment_port_internal'] = 'Port {0} already exists, but interface type has been changed to internal.'.format(name) comments['changes_port_internal'] = {'internal': {'old': False, 'new': True}} comments['comment_port_internal_not_changed'] = 'Port {0} already exists, but the interface type could not be changed to internal.'.format(name) if tunnel_type: comments['comment_invalid_ip'] = 'Remote is not valid ip address.' if tunnel_type == "vlan": comments['comment_vlan_invalid_id'] = 'VLANs id must be between 0 and 4095.' comments['comment_vlan_invalid_name'] = 'Could not find network interface {0}.'.format(name) comments['comment_vlan_port_exists'] = 'Port {0} with access to VLAN {1} already exists on bridge {2}.'.format(name, id, bridge) comments['comment_vlan_created'] = 'Created port {0} with access to VLAN {1} on bridge {2}.'.format(name, id, bridge) comments['comment_vlan_notcreated'] = 'Unable to create port {0} with access to VLAN {1} on ' \ 'bridge {2}.'.format(name, id, bridge) comments['changes_vlan_created'] = {name: {'old': 'No port named {0} with access to VLAN {1} present on ' 'bridge {2} present.'.format(name, id, bridge), 'new': 'Created port {1} with access to VLAN {2} on ' 'bridge {0}.'.format(bridge, name, id), } } elif tunnel_type == "gre": comments['comment_gre_invalid_id'] = 'Id of GRE tunnel must be an unsigned 32-bit integer.' comments['comment_gre_interface_exists'] = 'GRE tunnel interface {0} with rempte ip {1} and key {2} ' \ 'already exists on bridge {3}.'.format(name, remote, id, bridge) comments['comment_gre_created'] = 'Created GRE tunnel interface {0} with remote ip {1} and key {2} ' \ 'on bridge {3}.'.format(name, remote, id, bridge) comments['comment_gre_notcreated'] = 'Unable to create GRE tunnel interface {0} with remote ip {1} and key {2} ' \ 'on bridge {3}.'.format(name, remote, id, bridge) comments['changes_gre_created'] = {name: {'old': 'No GRE tunnel interface {0} with remote ip {1} and key {2} ' 'on bridge {3} present.'.format(name, remote, id, bridge), 'new': 'Created GRE tunnel interface {0} with remote ip {1} and key {2} ' 'on bridge {3}.'.format(name, remote, id, bridge), } } elif tunnel_type == "vxlan": comments['comment_dstport'] = ' (dst_port' + six.text_type(dst_port) + ')' if 0 < dst_port <= 65535 else '' comments['comment_vxlan_invalid_id'] = 'Id of VXLAN tunnel must be an unsigned 64-bit integer.' comments['comment_vxlan_interface_exists'] = 'VXLAN tunnel interface {0} with rempte ip {1} and key {2} ' \ 'already exists on bridge {3}{4}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['comment_vxlan_created'] = 'Created VXLAN tunnel interface {0} with remote ip {1} and key {2} ' \ 'on bridge {3}{4}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['comment_vxlan_notcreated'] = 'Unable to create VXLAN tunnel interface {0} with remote ip {1} and key {2} ' \ 'on bridge {3}{4}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['changes_vxlan_created'] = {name: {'old': 'No VXLAN tunnel interface {0} with remote ip {1} and key {2} ' 'on bridge {3}{4} present.'.format(name, remote, id, bridge, comments['comment_dstport']), 'new': 'Created VXLAN tunnel interface {0} with remote ip {1} and key {2} ' 'on bridge {3}{4}.'.format(name, remote, id, bridge, comments['comment_dstport']), } } # Check VLANs attributes def _check_vlan(): tag = __salt__['openvswitch.port_get_tag'](name) interfaces = __salt__['network.interfaces']() if not 0 <= id <= 4095: ret['result'] = False ret['comment'] = comments['comment_vlan_invalid_id'] elif not internal and name not in interfaces: ret['result'] = False ret['comment'] = comments['comment_vlan_invalid_name'] elif tag and name in port_list: try: if int(tag[0]) == id: ret['result'] = True ret['comment'] = comments['comment_vlan_port_exists'] except (ValueError, KeyError): pass # Check GRE tunnels attributes def _check_gre(): interface_options = __salt__['openvswitch.interface_get_options'](name) interface_type = __salt__['openvswitch.interface_get_type'](name) if not 0 <= id <= 2**32: ret['result'] = False ret['comment'] = comments['comment_gre_invalid_id'] elif not __salt__['dig.check_ip'](remote): ret['result'] = False ret['comment'] = comments['comment_invalid_ip'] elif interface_options and interface_type and name in port_list: interface_attroptions = '{key=\"' + six.text_type(id) + '\", remote_ip=\"' + six.text_type(remote) + '\"}' try: if interface_type[0] == 'gre' and interface_options[0] == interface_attroptions: ret['result'] = True ret['comment'] = comments['comment_gre_interface_exists'] except KeyError: pass # Check VXLAN tunnels attributes def _check_vxlan(): interface_options = __salt__['openvswitch.interface_get_options'](name) interface_type = __salt__['openvswitch.interface_get_type'](name) if not 0 <= id <= 2**64: ret['result'] = False ret['comment'] = comments['comment_vxlan_invalid_id'] elif not __salt__['dig.check_ip'](remote): ret['result'] = False ret['comment'] = comments['comment_invalid_ip'] elif interface_options and interface_type and name in port_list: opt_port = 'dst_port=\"' + six.text_type(dst_port) + '\", ' if 0 < dst_port <= 65535 else '' interface_attroptions = '{{{0}key=\"'.format(opt_port) + six.text_type(id) + '\", remote_ip=\"' + six.text_type(remote) + '\"}' try: if interface_type[0] == 'vxlan' and interface_options[0] == interface_attroptions: ret['result'] = True ret['comment'] = comments['comment_vxlan_interface_exists'] except KeyError: pass # Dry run, test=true mode if __opts__['test']: if bridge_exists: if tunnel_type == 'vlan': _check_vlan() if not ret['comment']: ret['result'] = None ret['comment'] = comments['comment_vlan_created'] elif tunnel_type == 'vxlan': _check_vxlan() if not ret['comment']: ret['result'] = None ret['comment'] = comments['comment_vxlan_created'] elif tunnel_type == 'gre': _check_gre() if not ret['comment']: ret['result'] = None ret['comment'] = comments['comment_gre_created'] else: if name in port_list: ret['result'] = True current_type = __salt__['openvswitch.interface_get_type']( name) # The interface type is returned as a single-element list. if internal and (current_type != ['internal']): ret['comment'] = comments['comment_port_internal'] else: ret['comment'] = comments['comment_port_exists'] else: ret['result'] = None ret['comment'] = comments['comment_port_created'] else: ret['result'] = None ret['comment'] = comments['comment_bridge_notexists'] return ret if bridge_exists: if tunnel_type == 'vlan': _check_vlan() if not ret['comment']: port_create_vlan = __salt__['openvswitch.port_create_vlan'](bridge, name, id, internal) if port_create_vlan: ret['result'] = True ret['comment'] = comments['comment_vlan_created'] ret['changes'] = comments['changes_vlan_created'] else: ret['result'] = False ret['comment'] = comments['comment_vlan_notcreated'] elif tunnel_type == 'vxlan': _check_vxlan() if not ret['comment']: port_create_vxlan = __salt__['openvswitch.port_create_vxlan'](bridge, name, id, remote, dst_port) if port_create_vxlan: ret['result'] = True ret['comment'] = comments['comment_vxlan_created'] ret['changes'] = comments['changes_vxlan_created'] else: ret['result'] = False ret['comment'] = comments['comment_vxlan_notcreated'] elif tunnel_type == 'gre': _check_gre() if not ret['comment']: port_create_gre = __salt__['openvswitch.port_create_gre'](bridge, name, id, remote) if port_create_gre: ret['result'] = True ret['comment'] = comments['comment_gre_created'] ret['changes'] = comments['changes_gre_created'] else: ret['result'] = False ret['comment'] = comments['comment_gre_notcreated'] else: if name in port_list: current_type = __salt__['openvswitch.interface_get_type'](name) # The interface type is returned as a single-element list. if internal and (current_type != ['internal']): # We do not have a direct way of only setting the interface # type to internal, so we add the port with the --may-exist # option. port_add = __salt__['openvswitch.port_add']( bridge, name, may_exist=True, internal=internal) if port_add: ret['result'] = True ret['comment'] = comments['comment_port_internal'] ret['changes'] = comments['changes_port_internal'] else: ret['result'] = False ret['comment'] = comments[ 'comment_port_internal_not_changed'] else: ret['result'] = True ret['comment'] = comments['comment_port_exists'] else: port_add = __salt__['openvswitch.port_add'](bridge, name, internal=internal) if port_add: ret['result'] = True ret['comment'] = comments['comment_port_created'] ret['changes'] = comments['changes_port_created'] else: ret['result'] = False ret['comment'] = comments['comment_port_notcreated'] else: ret['result'] = False ret['comment'] = comments['comment_bridge_notexists'] return ret
Ensures that the named port exists on bridge, eventually creates it. Args: name: The name of the port. bridge: The name of the bridge. tunnel_type: Optional type of interface to create, currently supports: vlan, vxlan and gre. id: Optional tunnel's key. remote: Remote endpoint's IP address. dst_port: Port to use when creating tunnelport in the switch. internal: Create an internal port if one does not exist
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/openvswitch_port.py#L20-L271
[ "def _check_vlan():\n tag = __salt__['openvswitch.port_get_tag'](name)\n interfaces = __salt__['network.interfaces']()\n if not 0 <= id <= 4095:\n ret['result'] = False\n ret['comment'] = comments['comment_vlan_invalid_id']\n elif not internal and name not in interfaces:\n ret['result'] = False\n ret['comment'] = comments['comment_vlan_invalid_name']\n elif tag and name in port_list:\n try:\n if int(tag[0]) == id:\n ret['result'] = True\n ret['comment'] = comments['comment_vlan_port_exists']\n except (ValueError, KeyError):\n pass\n", "def _check_gre():\n interface_options = __salt__['openvswitch.interface_get_options'](name)\n interface_type = __salt__['openvswitch.interface_get_type'](name)\n if not 0 <= id <= 2**32:\n ret['result'] = False\n ret['comment'] = comments['comment_gre_invalid_id']\n elif not __salt__['dig.check_ip'](remote):\n ret['result'] = False\n ret['comment'] = comments['comment_invalid_ip']\n elif interface_options and interface_type and name in port_list:\n interface_attroptions = '{key=\\\"' + six.text_type(id) + '\\\", remote_ip=\\\"' + six.text_type(remote) + '\\\"}'\n try:\n if interface_type[0] == 'gre' and interface_options[0] == interface_attroptions:\n ret['result'] = True\n ret['comment'] = comments['comment_gre_interface_exists']\n except KeyError:\n pass\n", "def _check_vxlan():\n interface_options = __salt__['openvswitch.interface_get_options'](name)\n interface_type = __salt__['openvswitch.interface_get_type'](name)\n if not 0 <= id <= 2**64:\n ret['result'] = False\n ret['comment'] = comments['comment_vxlan_invalid_id']\n elif not __salt__['dig.check_ip'](remote):\n ret['result'] = False\n ret['comment'] = comments['comment_invalid_ip']\n elif interface_options and interface_type and name in port_list:\n opt_port = 'dst_port=\\\"' + six.text_type(dst_port) + '\\\", ' if 0 < dst_port <= 65535 else ''\n interface_attroptions = '{{{0}key=\\\"'.format(opt_port) + six.text_type(id) + '\\\", remote_ip=\\\"' + six.text_type(remote) + '\\\"}'\n try:\n if interface_type[0] == 'vxlan' and interface_options[0] == interface_attroptions:\n ret['result'] = True\n ret['comment'] = comments['comment_vxlan_interface_exists']\n except KeyError:\n pass\n" ]
# -*- coding: utf-8 -*- ''' Management of Open vSwitch ports. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals # Import Salt libs from salt.ext import six def __virtual__(): ''' Only make these states available if Open vSwitch module is available. ''' return 'openvswitch.port_add' in __salt__ def absent(name, bridge=None): ''' Ensures that the named port exists on bridge, eventually deletes it. If bridge is not set, port is removed from whatever bridge contains it. Args: name: The name of the port. bridge: The name of the bridge. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} bridge_exists = False if bridge: bridge_exists = __salt__['openvswitch.bridge_exists'](bridge) if bridge_exists: port_list = __salt__['openvswitch.port_list'](bridge) else: port_list = () else: port_list = [name] # Comment and change messages comments = {} comments['comment_bridge_notexists'] = 'Bridge {0} does not exist.'.format(bridge) comments['comment_port_notexists'] = 'Port {0} does not exist on bridge {1}.'.format(name, bridge) comments['comment_port_deleted'] = 'Port {0} deleted.'.format(name) comments['comment_port_notdeleted'] = 'Unable to delete port {0}.'.format(name) comments['changes_port_deleted'] = {name: {'old': 'Port named {0} may exist.'.format(name), 'new': 'Deleted port {0}.'.format(name), } } # Dry run, test=true mode if __opts__['test']: if bridge and not bridge_exists: ret['result'] = None ret['comment'] = comments['comment_bridge_notexists'] elif name not in port_list: ret['result'] = True ret['comment'] = comments['comment_port_notexists'] else: ret['result'] = None ret['comment'] = comments['comment_port_deleted'] return ret if bridge and not bridge_exists: ret['result'] = False ret['comment'] = comments['comment_bridge_notexists'] elif name not in port_list: ret['result'] = True ret['comment'] = comments['comment_port_notexists'] else: if bridge: port_remove = __salt__['openvswitch.port_remove'](br=bridge, port=name) else: port_remove = __salt__['openvswitch.port_remove'](br=None, port=name) if port_remove: ret['result'] = True ret['comment'] = comments['comment_port_deleted'] ret['changes'] = comments['changes_port_deleted'] else: ret['result'] = False ret['comment'] = comments['comment_port_notdeleted'] return ret
saltstack/salt
salt/states/openvswitch_port.py
absent
python
def absent(name, bridge=None): ''' Ensures that the named port exists on bridge, eventually deletes it. If bridge is not set, port is removed from whatever bridge contains it. Args: name: The name of the port. bridge: The name of the bridge. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} bridge_exists = False if bridge: bridge_exists = __salt__['openvswitch.bridge_exists'](bridge) if bridge_exists: port_list = __salt__['openvswitch.port_list'](bridge) else: port_list = () else: port_list = [name] # Comment and change messages comments = {} comments['comment_bridge_notexists'] = 'Bridge {0} does not exist.'.format(bridge) comments['comment_port_notexists'] = 'Port {0} does not exist on bridge {1}.'.format(name, bridge) comments['comment_port_deleted'] = 'Port {0} deleted.'.format(name) comments['comment_port_notdeleted'] = 'Unable to delete port {0}.'.format(name) comments['changes_port_deleted'] = {name: {'old': 'Port named {0} may exist.'.format(name), 'new': 'Deleted port {0}.'.format(name), } } # Dry run, test=true mode if __opts__['test']: if bridge and not bridge_exists: ret['result'] = None ret['comment'] = comments['comment_bridge_notexists'] elif name not in port_list: ret['result'] = True ret['comment'] = comments['comment_port_notexists'] else: ret['result'] = None ret['comment'] = comments['comment_port_deleted'] return ret if bridge and not bridge_exists: ret['result'] = False ret['comment'] = comments['comment_bridge_notexists'] elif name not in port_list: ret['result'] = True ret['comment'] = comments['comment_port_notexists'] else: if bridge: port_remove = __salt__['openvswitch.port_remove'](br=bridge, port=name) else: port_remove = __salt__['openvswitch.port_remove'](br=None, port=name) if port_remove: ret['result'] = True ret['comment'] = comments['comment_port_deleted'] ret['changes'] = comments['changes_port_deleted'] else: ret['result'] = False ret['comment'] = comments['comment_port_notdeleted'] return ret
Ensures that the named port exists on bridge, eventually deletes it. If bridge is not set, port is removed from whatever bridge contains it. Args: name: The name of the port. bridge: The name of the bridge.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/openvswitch_port.py#L274-L339
null
# -*- coding: utf-8 -*- ''' Management of Open vSwitch ports. ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals # Import Salt libs from salt.ext import six def __virtual__(): ''' Only make these states available if Open vSwitch module is available. ''' return 'openvswitch.port_add' in __salt__ def present(name, bridge, tunnel_type=None, id=None, remote=None, dst_port=None, internal=False): ''' Ensures that the named port exists on bridge, eventually creates it. Args: name: The name of the port. bridge: The name of the bridge. tunnel_type: Optional type of interface to create, currently supports: vlan, vxlan and gre. id: Optional tunnel's key. remote: Remote endpoint's IP address. dst_port: Port to use when creating tunnelport in the switch. internal: Create an internal port if one does not exist ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} tunnel_types = ('vlan', 'vxlan', 'gre') if tunnel_type and tunnel_type not in tunnel_types: raise TypeError('The optional type argument must be one of these values: {0}.'.format( six.text_type(tunnel_types)) ) bridge_exists = __salt__['openvswitch.bridge_exists'](bridge) port_list = [] if bridge_exists: port_list = __salt__['openvswitch.port_list'](bridge) # Comment and change messages comments = {} comments['comment_bridge_notexists'] = 'Bridge {0} does not exist.'.format(bridge) comments['comment_port_exists'] = 'Port {0} already exists.'.format(name) comments['comment_port_created'] = 'Port {0} created on bridge {1}.'.format(name, bridge) comments['comment_port_notcreated'] = 'Unable to create port {0} on bridge {1}.'.format(name, bridge) comments['changes_port_created'] = {name: {'old': 'No port named {0} present.'.format(name), 'new': 'Created port {1} on bridge {0}.'.format(bridge, name), } } comments['comment_port_internal'] = 'Port {0} already exists, but interface type has been changed to internal.'.format(name) comments['changes_port_internal'] = {'internal': {'old': False, 'new': True}} comments['comment_port_internal_not_changed'] = 'Port {0} already exists, but the interface type could not be changed to internal.'.format(name) if tunnel_type: comments['comment_invalid_ip'] = 'Remote is not valid ip address.' if tunnel_type == "vlan": comments['comment_vlan_invalid_id'] = 'VLANs id must be between 0 and 4095.' comments['comment_vlan_invalid_name'] = 'Could not find network interface {0}.'.format(name) comments['comment_vlan_port_exists'] = 'Port {0} with access to VLAN {1} already exists on bridge {2}.'.format(name, id, bridge) comments['comment_vlan_created'] = 'Created port {0} with access to VLAN {1} on bridge {2}.'.format(name, id, bridge) comments['comment_vlan_notcreated'] = 'Unable to create port {0} with access to VLAN {1} on ' \ 'bridge {2}.'.format(name, id, bridge) comments['changes_vlan_created'] = {name: {'old': 'No port named {0} with access to VLAN {1} present on ' 'bridge {2} present.'.format(name, id, bridge), 'new': 'Created port {1} with access to VLAN {2} on ' 'bridge {0}.'.format(bridge, name, id), } } elif tunnel_type == "gre": comments['comment_gre_invalid_id'] = 'Id of GRE tunnel must be an unsigned 32-bit integer.' comments['comment_gre_interface_exists'] = 'GRE tunnel interface {0} with rempte ip {1} and key {2} ' \ 'already exists on bridge {3}.'.format(name, remote, id, bridge) comments['comment_gre_created'] = 'Created GRE tunnel interface {0} with remote ip {1} and key {2} ' \ 'on bridge {3}.'.format(name, remote, id, bridge) comments['comment_gre_notcreated'] = 'Unable to create GRE tunnel interface {0} with remote ip {1} and key {2} ' \ 'on bridge {3}.'.format(name, remote, id, bridge) comments['changes_gre_created'] = {name: {'old': 'No GRE tunnel interface {0} with remote ip {1} and key {2} ' 'on bridge {3} present.'.format(name, remote, id, bridge), 'new': 'Created GRE tunnel interface {0} with remote ip {1} and key {2} ' 'on bridge {3}.'.format(name, remote, id, bridge), } } elif tunnel_type == "vxlan": comments['comment_dstport'] = ' (dst_port' + six.text_type(dst_port) + ')' if 0 < dst_port <= 65535 else '' comments['comment_vxlan_invalid_id'] = 'Id of VXLAN tunnel must be an unsigned 64-bit integer.' comments['comment_vxlan_interface_exists'] = 'VXLAN tunnel interface {0} with rempte ip {1} and key {2} ' \ 'already exists on bridge {3}{4}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['comment_vxlan_created'] = 'Created VXLAN tunnel interface {0} with remote ip {1} and key {2} ' \ 'on bridge {3}{4}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['comment_vxlan_notcreated'] = 'Unable to create VXLAN tunnel interface {0} with remote ip {1} and key {2} ' \ 'on bridge {3}{4}.'.format(name, remote, id, bridge, comments['comment_dstport']) comments['changes_vxlan_created'] = {name: {'old': 'No VXLAN tunnel interface {0} with remote ip {1} and key {2} ' 'on bridge {3}{4} present.'.format(name, remote, id, bridge, comments['comment_dstport']), 'new': 'Created VXLAN tunnel interface {0} with remote ip {1} and key {2} ' 'on bridge {3}{4}.'.format(name, remote, id, bridge, comments['comment_dstport']), } } # Check VLANs attributes def _check_vlan(): tag = __salt__['openvswitch.port_get_tag'](name) interfaces = __salt__['network.interfaces']() if not 0 <= id <= 4095: ret['result'] = False ret['comment'] = comments['comment_vlan_invalid_id'] elif not internal and name not in interfaces: ret['result'] = False ret['comment'] = comments['comment_vlan_invalid_name'] elif tag and name in port_list: try: if int(tag[0]) == id: ret['result'] = True ret['comment'] = comments['comment_vlan_port_exists'] except (ValueError, KeyError): pass # Check GRE tunnels attributes def _check_gre(): interface_options = __salt__['openvswitch.interface_get_options'](name) interface_type = __salt__['openvswitch.interface_get_type'](name) if not 0 <= id <= 2**32: ret['result'] = False ret['comment'] = comments['comment_gre_invalid_id'] elif not __salt__['dig.check_ip'](remote): ret['result'] = False ret['comment'] = comments['comment_invalid_ip'] elif interface_options and interface_type and name in port_list: interface_attroptions = '{key=\"' + six.text_type(id) + '\", remote_ip=\"' + six.text_type(remote) + '\"}' try: if interface_type[0] == 'gre' and interface_options[0] == interface_attroptions: ret['result'] = True ret['comment'] = comments['comment_gre_interface_exists'] except KeyError: pass # Check VXLAN tunnels attributes def _check_vxlan(): interface_options = __salt__['openvswitch.interface_get_options'](name) interface_type = __salt__['openvswitch.interface_get_type'](name) if not 0 <= id <= 2**64: ret['result'] = False ret['comment'] = comments['comment_vxlan_invalid_id'] elif not __salt__['dig.check_ip'](remote): ret['result'] = False ret['comment'] = comments['comment_invalid_ip'] elif interface_options and interface_type and name in port_list: opt_port = 'dst_port=\"' + six.text_type(dst_port) + '\", ' if 0 < dst_port <= 65535 else '' interface_attroptions = '{{{0}key=\"'.format(opt_port) + six.text_type(id) + '\", remote_ip=\"' + six.text_type(remote) + '\"}' try: if interface_type[0] == 'vxlan' and interface_options[0] == interface_attroptions: ret['result'] = True ret['comment'] = comments['comment_vxlan_interface_exists'] except KeyError: pass # Dry run, test=true mode if __opts__['test']: if bridge_exists: if tunnel_type == 'vlan': _check_vlan() if not ret['comment']: ret['result'] = None ret['comment'] = comments['comment_vlan_created'] elif tunnel_type == 'vxlan': _check_vxlan() if not ret['comment']: ret['result'] = None ret['comment'] = comments['comment_vxlan_created'] elif tunnel_type == 'gre': _check_gre() if not ret['comment']: ret['result'] = None ret['comment'] = comments['comment_gre_created'] else: if name in port_list: ret['result'] = True current_type = __salt__['openvswitch.interface_get_type']( name) # The interface type is returned as a single-element list. if internal and (current_type != ['internal']): ret['comment'] = comments['comment_port_internal'] else: ret['comment'] = comments['comment_port_exists'] else: ret['result'] = None ret['comment'] = comments['comment_port_created'] else: ret['result'] = None ret['comment'] = comments['comment_bridge_notexists'] return ret if bridge_exists: if tunnel_type == 'vlan': _check_vlan() if not ret['comment']: port_create_vlan = __salt__['openvswitch.port_create_vlan'](bridge, name, id, internal) if port_create_vlan: ret['result'] = True ret['comment'] = comments['comment_vlan_created'] ret['changes'] = comments['changes_vlan_created'] else: ret['result'] = False ret['comment'] = comments['comment_vlan_notcreated'] elif tunnel_type == 'vxlan': _check_vxlan() if not ret['comment']: port_create_vxlan = __salt__['openvswitch.port_create_vxlan'](bridge, name, id, remote, dst_port) if port_create_vxlan: ret['result'] = True ret['comment'] = comments['comment_vxlan_created'] ret['changes'] = comments['changes_vxlan_created'] else: ret['result'] = False ret['comment'] = comments['comment_vxlan_notcreated'] elif tunnel_type == 'gre': _check_gre() if not ret['comment']: port_create_gre = __salt__['openvswitch.port_create_gre'](bridge, name, id, remote) if port_create_gre: ret['result'] = True ret['comment'] = comments['comment_gre_created'] ret['changes'] = comments['changes_gre_created'] else: ret['result'] = False ret['comment'] = comments['comment_gre_notcreated'] else: if name in port_list: current_type = __salt__['openvswitch.interface_get_type'](name) # The interface type is returned as a single-element list. if internal and (current_type != ['internal']): # We do not have a direct way of only setting the interface # type to internal, so we add the port with the --may-exist # option. port_add = __salt__['openvswitch.port_add']( bridge, name, may_exist=True, internal=internal) if port_add: ret['result'] = True ret['comment'] = comments['comment_port_internal'] ret['changes'] = comments['changes_port_internal'] else: ret['result'] = False ret['comment'] = comments[ 'comment_port_internal_not_changed'] else: ret['result'] = True ret['comment'] = comments['comment_port_exists'] else: port_add = __salt__['openvswitch.port_add'](bridge, name, internal=internal) if port_add: ret['result'] = True ret['comment'] = comments['comment_port_created'] ret['changes'] = comments['changes_port_created'] else: ret['result'] = False ret['comment'] = comments['comment_port_notcreated'] else: ret['result'] = False ret['comment'] = comments['comment_bridge_notexists'] return ret
saltstack/salt
salt/modules/system_profiler.py
_call_system_profiler
python
def _call_system_profiler(datatype): ''' Call out to system_profiler. Return a dictionary of the stuff we are interested in. ''' p = subprocess.Popen( [PROFILER_BINARY, '-detailLevel', 'full', '-xml', datatype], stdout=subprocess.PIPE) (sysprofresults, sysprof_stderr) = p.communicate(input=None) if six.PY2: plist = plistlib.readPlistFromString(sysprofresults) else: plist = plistlib.readPlistFromBytes(sysprofresults) try: apps = plist[0]['_items'] except (IndexError, KeyError): apps = [] return apps
Call out to system_profiler. Return a dictionary of the stuff we are interested in.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/system_profiler.py#L34-L55
null
# -*- coding: utf-8 -*- ''' System Profiler Module Interface with macOS's command-line System Profiler utility to get information about package receipts and installed applications. .. versionadded:: 2015.5.0 ''' from __future__ import absolute_import, unicode_literals, print_function import plistlib import subprocess import salt.utils.path from salt.ext import six PROFILER_BINARY = '/usr/sbin/system_profiler' def __virtual__(): ''' Check to see if the system_profiler binary is available ''' PROFILER_BINARY = salt.utils.path.which('system_profiler') if PROFILER_BINARY: return True return (False, 'The system_profiler execution module cannot be loaded: ' 'system_profiler unavailable.') def receipts(): ''' Return the results of a call to ``system_profiler -xml -detail full SPInstallHistoryDataType`` as a dictionary. Top-level keys of the dictionary are the names of each set of install receipts, since there can be multiple receipts with the same name. Contents of each key are a list of dictionaries. CLI Example: .. code-block:: bash salt '*' systemprofiler.receipts ''' apps = _call_system_profiler('SPInstallHistoryDataType') appdict = {} for a in apps: details = dict(a) details.pop('_name') if 'install_date' in details: details['install_date'] = details['install_date'].strftime('%Y-%m-%d %H:%M:%S') if 'info' in details: try: details['info'] = '{0}: {1}'.format(details['info'][0], details['info'][1].strftime('%Y-%m-%d %H:%M:%S')) except (IndexError, AttributeError): pass if a['_name'] not in appdict: appdict[a['_name']] = [] appdict[a['_name']].append(details) return appdict def applications(): ''' Return the results of a call to ``system_profiler -xml -detail full SPApplicationsDataType`` as a dictionary. Top-level keys of the dictionary are the names of each set of install receipts, since there can be multiple receipts with the same name. Contents of each key are a list of dictionaries. Note that this can take a long time depending on how many applications are installed on the target Mac. CLI Example: .. code-block:: bash salt '*' systemprofiler.applications ''' apps = _call_system_profiler('SPApplicationsDataType') appdict = {} for a in apps: details = dict(a) details.pop('_name') if 'lastModified' in details: details['lastModified'] = details['lastModified'].strftime('%Y-%m-%d %H:%M:%S') if 'info' in details: try: details['info'] = '{0}: {1}'.format(details['info'][0], details['info'][1].strftime('%Y-%m-%d %H:%M:%S')) except (IndexError, AttributeError): pass if a['_name'] not in appdict: appdict[a['_name']] = [] appdict[a['_name']].append(details) return appdict
saltstack/salt
salt/modules/system_profiler.py
receipts
python
def receipts(): ''' Return the results of a call to ``system_profiler -xml -detail full SPInstallHistoryDataType`` as a dictionary. Top-level keys of the dictionary are the names of each set of install receipts, since there can be multiple receipts with the same name. Contents of each key are a list of dictionaries. CLI Example: .. code-block:: bash salt '*' systemprofiler.receipts ''' apps = _call_system_profiler('SPInstallHistoryDataType') appdict = {} for a in apps: details = dict(a) details.pop('_name') if 'install_date' in details: details['install_date'] = details['install_date'].strftime('%Y-%m-%d %H:%M:%S') if 'info' in details: try: details['info'] = '{0}: {1}'.format(details['info'][0], details['info'][1].strftime('%Y-%m-%d %H:%M:%S')) except (IndexError, AttributeError): pass if a['_name'] not in appdict: appdict[a['_name']] = [] appdict[a['_name']].append(details) return appdict
Return the results of a call to ``system_profiler -xml -detail full SPInstallHistoryDataType`` as a dictionary. Top-level keys of the dictionary are the names of each set of install receipts, since there can be multiple receipts with the same name. Contents of each key are a list of dictionaries. CLI Example: .. code-block:: bash salt '*' systemprofiler.receipts
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/system_profiler.py#L58-L95
[ "def _call_system_profiler(datatype):\n '''\n Call out to system_profiler. Return a dictionary\n of the stuff we are interested in.\n '''\n\n p = subprocess.Popen(\n [PROFILER_BINARY, '-detailLevel', 'full',\n '-xml', datatype], stdout=subprocess.PIPE)\n (sysprofresults, sysprof_stderr) = p.communicate(input=None)\n\n if six.PY2:\n plist = plistlib.readPlistFromString(sysprofresults)\n else:\n plist = plistlib.readPlistFromBytes(sysprofresults)\n\n try:\n apps = plist[0]['_items']\n except (IndexError, KeyError):\n apps = []\n\n return apps\n" ]
# -*- coding: utf-8 -*- ''' System Profiler Module Interface with macOS's command-line System Profiler utility to get information about package receipts and installed applications. .. versionadded:: 2015.5.0 ''' from __future__ import absolute_import, unicode_literals, print_function import plistlib import subprocess import salt.utils.path from salt.ext import six PROFILER_BINARY = '/usr/sbin/system_profiler' def __virtual__(): ''' Check to see if the system_profiler binary is available ''' PROFILER_BINARY = salt.utils.path.which('system_profiler') if PROFILER_BINARY: return True return (False, 'The system_profiler execution module cannot be loaded: ' 'system_profiler unavailable.') def _call_system_profiler(datatype): ''' Call out to system_profiler. Return a dictionary of the stuff we are interested in. ''' p = subprocess.Popen( [PROFILER_BINARY, '-detailLevel', 'full', '-xml', datatype], stdout=subprocess.PIPE) (sysprofresults, sysprof_stderr) = p.communicate(input=None) if six.PY2: plist = plistlib.readPlistFromString(sysprofresults) else: plist = plistlib.readPlistFromBytes(sysprofresults) try: apps = plist[0]['_items'] except (IndexError, KeyError): apps = [] return apps def applications(): ''' Return the results of a call to ``system_profiler -xml -detail full SPApplicationsDataType`` as a dictionary. Top-level keys of the dictionary are the names of each set of install receipts, since there can be multiple receipts with the same name. Contents of each key are a list of dictionaries. Note that this can take a long time depending on how many applications are installed on the target Mac. CLI Example: .. code-block:: bash salt '*' systemprofiler.applications ''' apps = _call_system_profiler('SPApplicationsDataType') appdict = {} for a in apps: details = dict(a) details.pop('_name') if 'lastModified' in details: details['lastModified'] = details['lastModified'].strftime('%Y-%m-%d %H:%M:%S') if 'info' in details: try: details['info'] = '{0}: {1}'.format(details['info'][0], details['info'][1].strftime('%Y-%m-%d %H:%M:%S')) except (IndexError, AttributeError): pass if a['_name'] not in appdict: appdict[a['_name']] = [] appdict[a['_name']].append(details) return appdict
saltstack/salt
salt/states/solrcloud.py
alias
python
def alias(name, collections, **kwargs): ''' Create alias and enforce collection list. Use the solrcloud module to get alias members and set them. You can pass additional arguments that will be forwarded to http.query name The collection name collections list of collections to include in the alias ''' ret = { 'name': name, 'changes': {}, 'result': False, 'comment': '', } if __salt__['solrcloud.alias_exists'](name, **kwargs): alias_content = __salt__['solrcloud.alias_get_collections'](name, **kwargs) diff = set(alias_content).difference(set(collections)) if not diff: ret['result'] = True ret['comment'] = 'Alias is in desired state' return ret if __opts__['test']: ret['comment'] = 'The alias "{0}" will be updated.'.format(name) ret['result'] = None else: __salt__['solrcloud.alias_set_collections'](name, collections, **kwargs) ret['comment'] = 'The alias "{0}" has been updated.'.format(name) ret['result'] = True ret['changes'] = { 'old': ','.join(alias_content), 'new': ','.join(collections), } else: if __opts__['test']: ret['comment'] = 'The alias "{0}" will be created.'.format(name) ret['result'] = None else: __salt__['solrcloud.alias_set_collections'](name, collections, **kwargs) ret['comment'] = 'The alias "{0}" has been created.'.format(name) ret['result'] = True ret['changes'] = { 'old': None, 'new': ','.join(collections), } return ret
Create alias and enforce collection list. Use the solrcloud module to get alias members and set them. You can pass additional arguments that will be forwarded to http.query name The collection name collections list of collections to include in the alias
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/solrcloud.py#L19-L75
null
# -*- coding: utf-8 -*- ''' States for solrcloud alias and collection configuration .. versionadded:: 2017.7.0 ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.json # Import 3rd party libs from salt.ext import six def collection(name, options=None, **kwargs): ''' Create collection and enforce options. Use the solrcloud module to get collection parameters. You can pass additional arguments that will be forwarded to http.query name The collection name options : {} options to ensure ''' ret = { 'name': name, 'changes': {}, 'result': False, 'comment': '', } if options is None: options = {} if __salt__["solrcloud.collection_exists"](name, **kwargs): diff = {} current_options = __salt__["solrcloud.collection_get_options"](name, **kwargs) # Filter options that can be updated updatable_options = [ "maxShardsPerNode", "replicationFactor", "autoAddReplicas", "collection.configName", "rule", "snitch"] options = [k for k in six.iteritems(options) if k in updatable_options] for key, value in options: if key not in current_options or current_options[key] != value: diff[key] = value if not diff: ret['result'] = True ret['comment'] = 'Collection options are in desired state' return ret else: if __opts__['test']: ret['comment'] = 'Collection options "{0}" will be changed.'.format(name) ret['result'] = None else: __salt__['solrcloud.collection_set_options'](name, diff, **kwargs) ret['comment'] = 'Parameters were updated for collection "{0}".'.format(name) ret['result'] = True ret['changes'] = { 'old': salt.utils.json.dumps(current_options, sort_keys=True, indent=4, separators=(',', ': ')), 'new': salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')) } return ret else: new_changes = salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')) if __opts__['test']: ret['comment'] = 'The collection "{0}" will be created.'.format(name) ret['result'] = None else: __salt__["solrcloud.collection_create"](name, options, **kwargs) ret['comment'] = 'The collection "{0}" has been created.'.format(name) ret['result'] = True ret['changes'] = { 'old': None, 'new': str('options=') + new_changes # future lint: disable=blacklisted-function } return ret
saltstack/salt
salt/states/solrcloud.py
collection
python
def collection(name, options=None, **kwargs): ''' Create collection and enforce options. Use the solrcloud module to get collection parameters. You can pass additional arguments that will be forwarded to http.query name The collection name options : {} options to ensure ''' ret = { 'name': name, 'changes': {}, 'result': False, 'comment': '', } if options is None: options = {} if __salt__["solrcloud.collection_exists"](name, **kwargs): diff = {} current_options = __salt__["solrcloud.collection_get_options"](name, **kwargs) # Filter options that can be updated updatable_options = [ "maxShardsPerNode", "replicationFactor", "autoAddReplicas", "collection.configName", "rule", "snitch"] options = [k for k in six.iteritems(options) if k in updatable_options] for key, value in options: if key not in current_options or current_options[key] != value: diff[key] = value if not diff: ret['result'] = True ret['comment'] = 'Collection options are in desired state' return ret else: if __opts__['test']: ret['comment'] = 'Collection options "{0}" will be changed.'.format(name) ret['result'] = None else: __salt__['solrcloud.collection_set_options'](name, diff, **kwargs) ret['comment'] = 'Parameters were updated for collection "{0}".'.format(name) ret['result'] = True ret['changes'] = { 'old': salt.utils.json.dumps(current_options, sort_keys=True, indent=4, separators=(',', ': ')), 'new': salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')) } return ret else: new_changes = salt.utils.json.dumps(options, sort_keys=True, indent=4, separators=(',', ': ')) if __opts__['test']: ret['comment'] = 'The collection "{0}" will be created.'.format(name) ret['result'] = None else: __salt__["solrcloud.collection_create"](name, options, **kwargs) ret['comment'] = 'The collection "{0}" has been created.'.format(name) ret['result'] = True ret['changes'] = { 'old': None, 'new': str('options=') + new_changes # future lint: disable=blacklisted-function } return ret
Create collection and enforce options. Use the solrcloud module to get collection parameters. You can pass additional arguments that will be forwarded to http.query name The collection name options : {} options to ensure
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/solrcloud.py#L78-L159
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n" ]
# -*- coding: utf-8 -*- ''' States for solrcloud alias and collection configuration .. versionadded:: 2017.7.0 ''' # Import Python libs from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.json # Import 3rd party libs from salt.ext import six def alias(name, collections, **kwargs): ''' Create alias and enforce collection list. Use the solrcloud module to get alias members and set them. You can pass additional arguments that will be forwarded to http.query name The collection name collections list of collections to include in the alias ''' ret = { 'name': name, 'changes': {}, 'result': False, 'comment': '', } if __salt__['solrcloud.alias_exists'](name, **kwargs): alias_content = __salt__['solrcloud.alias_get_collections'](name, **kwargs) diff = set(alias_content).difference(set(collections)) if not diff: ret['result'] = True ret['comment'] = 'Alias is in desired state' return ret if __opts__['test']: ret['comment'] = 'The alias "{0}" will be updated.'.format(name) ret['result'] = None else: __salt__['solrcloud.alias_set_collections'](name, collections, **kwargs) ret['comment'] = 'The alias "{0}" has been updated.'.format(name) ret['result'] = True ret['changes'] = { 'old': ','.join(alias_content), 'new': ','.join(collections), } else: if __opts__['test']: ret['comment'] = 'The alias "{0}" will be created.'.format(name) ret['result'] = None else: __salt__['solrcloud.alias_set_collections'](name, collections, **kwargs) ret['comment'] = 'The alias "{0}" has been created.'.format(name) ret['result'] = True ret['changes'] = { 'old': None, 'new': ','.join(collections), } return ret
saltstack/salt
salt/modules/consul.py
_query
python
def _query(function, consul_url, token=None, method='GET', api_version='v1', data=None, query_params=None): ''' Consul object method function to construct and execute on the API URL. :param api_url: The Consul api url. :param api_version The Consul api version :param function: The Consul api function to perform. :param method: The HTTP method, e.g. GET or POST. :param data: The data to be sent for POST method. This param is ignored for GET requests. :return: The json response from the API call or False. ''' if not query_params: query_params = {} ret = {'data': '', 'res': True} if not token: token = _get_token() headers = {"X-Consul-Token": token, "Content-Type": "application/json"} base_url = urllib.parse.urljoin(consul_url, '{0}/'.format(api_version)) url = urllib.parse.urljoin(base_url, function, False) if method == 'GET': data = None else: if data is None: data = {} data = salt.utils.json.dumps(data) result = salt.utils.http.query( url, method=method, params=query_params, data=data, decode=True, status=True, header_dict=headers, opts=__opts__, ) if result.get('status', None) == http_client.OK: ret['data'] = result.get('dict', result) ret['res'] = True elif result.get('status', None) == http_client.NO_CONTENT: ret['res'] = False elif result.get('status', None) == http_client.NOT_FOUND: ret['data'] = 'Key not found.' ret['res'] = False else: if result: ret['data'] = result ret['res'] = True else: ret['res'] = False return ret
Consul object method function to construct and execute on the API URL. :param api_url: The Consul api url. :param api_version The Consul api version :param function: The Consul api function to perform. :param method: The HTTP method, e.g. GET or POST. :param data: The data to be sent for POST method. This param is ignored for GET requests. :return: The json response from the API call or False.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/consul.py#L50-L113
[ "def dumps(obj, **kwargs):\n '''\n .. versionadded:: 2018.3.0\n\n Wraps json.dumps, and assumes that ensure_ascii is False (unless explicitly\n passed as True) for unicode compatibility. Note that setting it to True\n will mess up any unicode characters, as they will be dumped as the string\n literal version of the unicode code point.\n\n On Python 2, encodes the result to a str since json.dumps does not want\n unicode types.\n\n You can pass an alternate json module (loaded via import_json() above)\n using the _json_module argument)\n '''\n json_module = kwargs.pop('_json_module', json)\n orig_enc_func = kwargs.pop('default', lambda x: x)\n\n def _enc_func(obj):\n obj = ThreadLocalProxy.unproxy(obj)\n return orig_enc_func(obj)\n\n if 'ensure_ascii' not in kwargs:\n kwargs['ensure_ascii'] = False\n if six.PY2:\n obj = salt.utils.data.encode(obj)\n return json_module.dumps(obj, default=_enc_func, **kwargs) # future lint: blacklisted-function\n", "def _get_token():\n '''\n Retrieve Consul configuration\n '''\n return __salt__['config.get']('consul.token') or \\\n __salt__['config.get']('consul:token')\n" ]
# -*- coding: utf-8 -*- ''' Interact with Consul https://www.consul.io ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import base64 import logging # Import salt libs import salt.utils.http import salt.utils.json # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import http_client, urllib log = logging.getLogger(__name__) from salt.exceptions import SaltInvocationError # Don't shadow built-ins. __func_alias__ = { 'list_': 'list' } __virtualname__ = 'consul' def _get_config(): ''' Retrieve Consul configuration ''' return __salt__['config.get']('consul.url') or \ __salt__['config.get']('consul:url') def _get_token(): ''' Retrieve Consul configuration ''' return __salt__['config.get']('consul.token') or \ __salt__['config.get']('consul:token') def list_(consul_url=None, token=None, key=None, **kwargs): ''' List keys in Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :return: The list of keys. CLI Example: .. code-block:: bash salt '*' consul.list salt '*' consul.list key='web' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret query_params = {} if 'recurse' in kwargs: query_params['recurse'] = 'True' # No key so recurse and show all values if not key: query_params['recurse'] = 'True' function = 'kv/' else: function = 'kv/{0}'.format(key) query_params['keys'] = 'True' query_params['separator'] = '/' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def get(consul_url=None, key=None, token=None, recurse=False, decode=False, raw=False): ''' Get key from Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param recurse: Return values recursively beginning at the value of key. :param decode: By default values are stored as Base64 encoded values, decode will return the whole key with the value decoded. :param raw: Simply return the decoded value of the key. :return: The keys in Consul. CLI Example: .. code-block:: bash salt '*' consul.get key='web/key1' salt '*' consul.get key='web' recurse=True salt '*' consul.get key='web' recurse=True decode=True By default values stored in Consul are base64 encoded, passing the decode option will show them as the decoded values. .. code-block:: bash salt '*' consul.get key='web' recurse=True decode=True raw=True By default Consult will return other information about the key, the raw option will return only the raw value. ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') query_params = {} function = 'kv/{0}'.format(key) if recurse: query_params['recurse'] = 'True' if raw: query_params['raw'] = True ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if ret['res']: if decode: for item in ret['data']: if item['Value'] is None: item['Value'] = "" else: item['Value'] = base64.b64decode(item['Value']) return ret def put(consul_url=None, token=None, key=None, value=None, **kwargs): ''' Put values into Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param value: The value to set the key to. :param flags: This can be used to specify an unsigned value between 0 and 2^64-1. Clients can choose to use this however makes sense for their application. :param cas: This flag is used to turn the PUT into a Check-And-Set operation. :param acquire: This flag is used to turn the PUT into a lock acquisition operation. :param release: This flag is used to turn the PUT into a lock release operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.put key='web/key1' value="Hello there" salt '*' consul.put key='web/key1' value="Hello there" acquire='d5d371f4-c380-5280-12fd-8810be175592' salt '*' consul.put key='web/key1' value="Hello there" release='d5d371f4-c380-5280-12fd-8810be175592' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') # Invalid to specified these together conflicting_args = ['cas', 'release', 'acquire'] for _l1 in conflicting_args: for _l2 in conflicting_args: if _l1 in kwargs and _l2 in kwargs and _l1 != _l2: raise SaltInvocationError('Using arguments `{0}` and `{1}`' ' together is invalid.'.format(_l1, _l2)) query_params = {} available_sessions = session_list(consul_url=consul_url, return_list=True) _current = get(consul_url=consul_url, key=key) if 'flags' in kwargs: if kwargs['flags'] >= 0 and kwargs['flags'] <= 2**64: query_params['flags'] = kwargs['flags'] if 'cas' in kwargs: if _current['res']: if kwargs['cas'] == 0: ret['message'] = ('Key {0} exists, index ' 'must be non-zero.'.format(key)) ret['res'] = False return ret if kwargs['cas'] != _current['data']['ModifyIndex']: ret['message'] = ('Key {0} exists, but indexes ' 'do not match.'.format(key)) ret['res'] = False return ret query_params['cas'] = kwargs['cas'] else: ret['message'] = ('Key {0} does not exists, ' 'CAS argument can not be used.'.format(key)) ret['res'] = False return ret if 'acquire' in kwargs: if kwargs['acquire'] not in available_sessions: ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire']) ret['res'] = False return ret query_params['acquire'] = kwargs['acquire'] if 'release' in kwargs: if _current['res']: if 'Session' in _current['data']: if _current['data']['Session'] == kwargs['release']: query_params['release'] = kwargs['release'] else: ret['message'] = '{0} locked by another session.'.format(key) ret['res'] = False return ret else: ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire']) ret['res'] = False else: log.error('Key {0} does not exist. Skipping release.') data = value function = 'kv/{0}'.format(key) method = 'PUT' ret = _query(consul_url=consul_url, token=token, function=function, method=method, data=data, query_params=query_params) if ret['res']: ret['res'] = True ret['data'] = 'Added key {0} with value {1}.'.format(key, value) else: ret['res'] = False ret['data'] = 'Unable to add key {0} with value {1}.'.format(key, value) return ret def delete(consul_url=None, token=None, key=None, **kwargs): ''' Delete values from Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param recurse: Delete values recursively beginning at the value of key. :param cas: This flag is used to turn the DELETE into a Check-And-Set operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.delete key='web' salt '*' consul.delete key='web' recurse='True' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') query_params = {} if 'recurse' in kwargs: query_params['recurse'] = True if 'cas' in kwargs: if kwargs['cas'] > 0: query_params['cas'] = kwargs['cas'] else: ret['message'] = ('Check and Set Operation ', 'value must be greater than 0.') ret['res'] = False return ret function = 'kv/{0}'.format(key) ret = _query(consul_url=consul_url, token=token, function=function, method='DELETE', query_params=query_params) if ret['res']: ret['res'] = True ret['message'] = 'Deleted key {0}.'.format(key) else: ret['res'] = False ret['message'] = 'Unable to delete key {0}.'.format(key) return ret def agent_checks(consul_url=None, token=None): ''' Returns the checks the local agent is managing :param consul_url: The Consul server URL. :return: Returns the checks the local agent is managing CLI Example: .. code-block:: bash salt '*' consul.agent_checks ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/checks' ret = _query(consul_url=consul_url, function=function, token=token, method='GET') return ret def agent_services(consul_url=None, token=None): ''' Returns the services the local agent is managing :param consul_url: The Consul server URL. :return: Returns the services the local agent is managing CLI Example: .. code-block:: bash salt '*' consul.agent_services ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/services' ret = _query(consul_url=consul_url, function=function, token=token, method='GET') return ret def agent_members(consul_url=None, token=None, **kwargs): ''' Returns the members as seen by the local serf agent :param consul_url: The Consul server URL. :return: Returns the members as seen by the local serf agent CLI Example: .. code-block:: bash salt '*' consul.agent_members ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'wan' in kwargs: query_params['wan'] = kwargs['wan'] function = 'agent/members' ret = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) return ret def agent_self(consul_url=None, token=None): ''' Returns the local node configuration :param consul_url: The Consul server URL. :return: Returns the local node configuration CLI Example: .. code-block:: bash salt '*' consul.agent_self ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/self' ret = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) return ret def agent_maintenance(consul_url=None, token=None, **kwargs): ''' Manages node maintenance mode :param consul_url: The Consul server URL. :param enable: The enable flag is required. Acceptable values are either true (to enter maintenance mode) or false (to resume normal operation). :param reason: If provided, its value should be a text string explaining the reason for placing the node into maintenance mode. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_maintenance enable='False' reason='Upgrade in progress' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'enable' in kwargs: query_params['enable'] = kwargs['enable'] else: ret['message'] = 'Required parameter "enable" is missing.' ret['res'] = False return ret if 'reason' in kwargs: query_params['reason'] = kwargs['reason'] function = 'agent/maintenance' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = ('Agent maintenance mode ' '{0}ed.'.format(kwargs['enable'])) else: ret['res'] = True ret['message'] = 'Unable to change maintenance mode for agent.' return ret def agent_join(consul_url=None, token=None, address=None, **kwargs): ''' Triggers the local agent to join a node :param consul_url: The Consul server URL. :param address: The address for the agent to connect to. :param wan: Causes the agent to attempt to join using the WAN pool. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_join address='192.168.1.1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not address: raise SaltInvocationError('Required argument "address" is missing.') if 'wan' in kwargs: query_params['wan'] = kwargs['wan'] function = 'agent/join/{0}'.format(address) res = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Agent joined the cluster' else: ret['res'] = False ret['message'] = 'Unable to join the cluster.' return ret def agent_leave(consul_url=None, token=None, node=None): ''' Used to instruct the agent to force a node into the left state. :param consul_url: The Consul server URL. :param node: The node the agent will force into left state :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_leave node='web1.example.com' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') function = 'agent/force-leave/{0}'.format(node) res = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Node {0} put in leave state.'.format(node) else: ret['res'] = False ret['message'] = 'Unable to change state for {0}.'.format(node) return ret def agent_check_register(consul_url=None, token=None, **kwargs): ''' The register endpoint is used to add a new check to the local agent. :param consul_url: The Consul server URL. :param name: The description of what the check is for. :param id: The unique name to use for the check, if not provided 'name' is used. :param notes: Human readable description of the check. :param script: If script is provided, the check type is a script, and Consul will evaluate that script based on the interval parameter. :param http: Check will perform an HTTP GET request against the value of HTTP (expected to be a URL) based on the interval parameter. :param ttl: If a TTL type is used, then the TTL update endpoint must be used periodically to update the state of the check. :param interval: Interval at which the check should run. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_register name='Memory Utilization' script='/usr/local/bin/check_mem.py' interval='15s' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if True not in [True for item in ('script', 'http', 'ttl') if item in kwargs]: ret['message'] = 'Required parameter "script" or "http" is missing.' ret['res'] = False return ret if 'id' in kwargs: data['ID'] = kwargs['id'] if 'notes' in kwargs: data['Notes'] = kwargs['notes'] if 'script' in kwargs: if 'interval' not in kwargs: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret data['Script'] = kwargs['script'] data['Interval'] = kwargs['interval'] if 'http' in kwargs: if 'interval' not in kwargs: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret data['HTTP'] = kwargs['http'] data['Interval'] = kwargs['interval'] if 'ttl' in kwargs: data['TTL'] = kwargs['ttl'] function = 'agent/check/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = ('Check {0} added to agent.'.format(kwargs['name'])) else: ret['res'] = False ret['message'] = 'Unable to add check to agent.' return ret def agent_check_deregister(consul_url=None, token=None, checkid=None): ''' The agent will take care of deregistering the check from the Catalog. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_deregister checkid='Memory Utilization' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') function = 'agent/check/deregister/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, method='GET') if res['res']: ret['res'] = True ret['message'] = ('Check {0} removed from agent.'.format(checkid)) else: ret['res'] = False ret['message'] = 'Unable to remove check from agent.' return ret def agent_check_pass(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to passing and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to mark as passing. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_pass checkid='redis_check1' note='Forcing check into passing state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/pass/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as passing.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_check_warn(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to warning and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_warn checkid='redis_check1' note='Forcing check into warning state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/warn/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as warning.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_check_fail(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to critical and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_fail checkid='redis_check1' note='Forcing check into critical state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/fail/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as critical.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_service_register(consul_url=None, token=None, **kwargs): ''' The used to add a new service, with an optional health check, to the local agent. :param consul_url: The Consul server URL. :param name: A name describing the service. :param address: The address used by the service, defaults to the address of the agent. :param port: The port used by the service. :param id: Unique ID to identify the service, if not provided the value of the name parameter is used. :param tags: Identifying tags for service, string or list. :param script: If script is provided, the check type is a script, and Consul will evaluate that script based on the interval parameter. :param http: Check will perform an HTTP GET request against the value of HTTP (expected to be a URL) based on the interval parameter. :param check_ttl: If a TTL type is used, then the TTL update endpoint must be used periodically to update the state of the check. :param check_interval: Interval at which the check should run. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_register name='redis' tags='["master", "v1"]' address="127.0.0.1" port="8080" check_script="/usr/local/bin/check_redis.py" interval="10s" ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret lc_kwargs = dict() for k, v in six.iteritems(kwargs): lc_kwargs[k.lower()] = v if 'name' in lc_kwargs: data['Name'] = lc_kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'address' in lc_kwargs: data['Address'] = lc_kwargs['address'] if 'port' in lc_kwargs: data['Port'] = lc_kwargs['port'] if 'id' in lc_kwargs: data['ID'] = lc_kwargs['id'] if 'tags' in lc_kwargs: _tags = lc_kwargs['tags'] if not isinstance(_tags, list): _tags = [_tags] data['Tags'] = _tags if 'enabletagoverride' in lc_kwargs: data['EnableTagOverride'] = lc_kwargs['enabletagoverride'] if 'check' in lc_kwargs: dd = dict() for k, v in six.iteritems(lc_kwargs['check']): dd[k.lower()] = v interval_required = False check_dd = dict() if 'script' in dd: interval_required = True check_dd['Script'] = dd['script'] if 'http' in dd: interval_required = True check_dd['HTTP'] = dd['http'] if 'ttl' in dd: check_dd['TTL'] = dd['ttl'] if 'interval' in dd: check_dd['Interval'] = dd['interval'] if interval_required: if 'Interval' not in check_dd: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret else: if 'Interval' in check_dd: del check_dd['Interval'] # not required, so ignore it if check_dd > 0: data['Check'] = check_dd # if empty, ignore it function = 'agent/service/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Service {0} registered on agent.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to register service {0}.'.format(kwargs['name']) return ret def agent_service_deregister(consul_url=None, token=None, serviceid=None): ''' Used to remove a service. :param consul_url: The Consul server URL. :param serviceid: A serviceid describing the service. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_deregister serviceid='redis' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not serviceid: raise SaltInvocationError('Required argument "serviceid" is missing.') function = 'agent/service/deregister/{0}'.format(serviceid) res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Service {0} removed from agent.'.format(serviceid) else: ret['res'] = False ret['message'] = 'Unable to remove service {0}.'.format(serviceid) return ret def agent_service_maintenance(consul_url=None, token=None, serviceid=None, **kwargs): ''' Used to place a service into maintenance mode. :param consul_url: The Consul server URL. :param serviceid: A name of the service. :param enable: Whether the service should be enabled or disabled. :param reason: A human readable message of why the service was enabled or disabled. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_deregister serviceid='redis' enable='True' reason='Down for upgrade' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not serviceid: raise SaltInvocationError('Required argument "serviceid" is missing.') if 'enable' in kwargs: query_params['enable'] = kwargs['enable'] else: ret['message'] = 'Required parameter "enable" is missing.' ret['res'] = False return ret if 'reason' in kwargs: query_params['reason'] = kwargs['reason'] function = 'agent/service/maintenance/{0}'.format(serviceid) res = _query(consul_url=consul_url, token=token, function=function, query_params=query_params) if res['res']: ret['res'] = True ret['message'] = ('Service {0} set in ' 'maintenance mode.'.format(serviceid)) else: ret['res'] = False ret['message'] = ('Unable to set service ' '{0} to maintenance mode.'.format(serviceid)) return ret def session_create(consul_url=None, token=None, **kwargs): ''' Used to create a session. :param consul_url: The Consul server URL. :param lockdelay: Duration string using a "s" suffix for seconds. The default is 15s. :param node: Must refer to a node that is already registered, if specified. By default, the agent's own node name is used. :param name: A human-readable name for the session :param checks: A list of associated health checks. It is highly recommended that, if you override this list, you include the default "serfHealth". :param behavior: Can be set to either release or delete. This controls the behavior when a session is invalidated. By default, this is release, causing any locks that are held to be released. Changing this to delete causes any locks that are held to be deleted. delete is useful for creating ephemeral key/value entries. :param ttl: Session is invalidated if it is not renewed before the TTL expires :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_create node='node1' name='my-session' behavior='delete' ttl='3600s' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret data = {} if 'lockdelay' in kwargs: data['LockDelay'] = kwargs['lockdelay'] if 'node' in kwargs: data['Node'] = kwargs['node'] if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'checks' in kwargs: data['Touch'] = kwargs['touch'] if 'behavior' in kwargs: if not kwargs['behavior'] in ('delete', 'release'): ret['message'] = ('Behavior must be ', 'either delete or release.') ret['res'] = False return ret data['Behavior'] = kwargs['behavior'] if 'ttl' in kwargs: _ttl = kwargs['ttl'] if six.text_type(_ttl).endswith('s'): _ttl = _ttl[:-1] if int(_ttl) < 0 or int(_ttl) > 3600: ret['message'] = ('TTL must be ', 'between 0 and 3600.') ret['res'] = False return ret data['TTL'] = '{0}s'.format(_ttl) function = 'session/create' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Created session {0}.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to create session {0}.'.format(kwargs['name']) return ret def session_list(consul_url=None, token=None, return_list=False, **kwargs): ''' Used to list sessions. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param return_list: By default, all information about the sessions is returned, using the return_list parameter will return a list of session IDs. :return: A list of all available sessions. CLI Example: .. code-block:: bash salt '*' consul.session_list ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/list' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if return_list: _list = [] for item in ret['data']: _list.append(item['ID']) return _list return ret def session_destroy(consul_url=None, token=None, session=None, **kwargs): ''' Destroy session :param consul_url: The Consul server URL. :param session: The ID of the session to destroy. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_destroy session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not session: raise SaltInvocationError('Required argument "session" is missing.') query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/destroy/{0}'.format(session) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Created Service {0}.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to create service {0}.'.format(kwargs['name']) return ret def session_info(consul_url=None, token=None, session=None, **kwargs): ''' Information about a session :param consul_url: The Consul server URL. :param session: The ID of the session to return information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_info session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not session: raise SaltInvocationError('Required argument "session" is missing.') query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/info/{0}'.format(session) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_register(consul_url=None, token=None, **kwargs): ''' Registers a new node, service, or check :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param node: The node to register. :param address: The address of the node. :param service: The service that will be registered. :param service_address: The address that the service listens on. :param service_port: The port for the service. :param service_id: A unique identifier for the service, if this is not provided "name" will be used. :param service_tags: Any tags associated with the service. :param check: The name of the health check to register :param check_status: The initial status of the check, must be one of unknown, passing, warning, or critical. :param check_service: The service that the check is performed against. :param check_id: Unique identifier for the service. :param check_notes: An opaque field that is meant to hold human-readable text. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.catalog_register node='node1' address='192.168.1.1' service='redis' service_address='127.0.0.1' service_port='8080' service_id='redis_server1' ''' ret = {} data = {} data['NodeMeta'] = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'datacenter' in kwargs: data['Datacenter'] = kwargs['datacenter'] if 'node' in kwargs: data['Node'] = kwargs['node'] else: ret['message'] = 'Required argument node argument is missing.' ret['res'] = False return ret if 'address' in kwargs: if isinstance(kwargs['address'], list): _address = kwargs['address'][0] else: _address = kwargs['address'] data['Address'] = _address else: ret['message'] = 'Required argument address argument is missing.' ret['res'] = False return ret if 'ip_interfaces' in kwargs: data['TaggedAddresses'] = {} for k in kwargs['ip_interfaces']: if kwargs['ip_interfaces'].get(k): data['TaggedAddresses'][k] = kwargs['ip_interfaces'][k][0] if 'service' in kwargs: data['Service'] = {} data['Service']['Service'] = kwargs['service'] if 'service_address' in kwargs: data['Service']['Address'] = kwargs['service_address'] if 'service_port' in kwargs: data['Service']['Port'] = kwargs['service_port'] if 'service_id' in kwargs: data['Service']['ID'] = kwargs['service_id'] if 'service_tags' in kwargs: _tags = kwargs['service_tags'] if not isinstance(_tags, list): _tags = [_tags] data['Service']['Tags'] = _tags if 'cpu' in kwargs: data['NodeMeta']['Cpu'] = kwargs['cpu'] if 'num_cpus' in kwargs: data['NodeMeta']['Cpu_num'] = kwargs['num_cpus'] if 'mem' in kwargs: data['NodeMeta']['Memory'] = kwargs['mem'] if 'oscode' in kwargs: data['NodeMeta']['Os'] = kwargs['oscode'] if 'osarch' in kwargs: data['NodeMeta']['Osarch'] = kwargs['osarch'] if 'kernel' in kwargs: data['NodeMeta']['Kernel'] = kwargs['kernel'] if 'kernelrelease' in kwargs: data['NodeMeta']['Kernelrelease'] = kwargs['kernelrelease'] if 'localhost' in kwargs: data['NodeMeta']['localhost'] = kwargs['localhost'] if 'nodename' in kwargs: data['NodeMeta']['nodename'] = kwargs['nodename'] if 'os_family' in kwargs: data['NodeMeta']['os_family'] = kwargs['os_family'] if 'lsb_distrib_description' in kwargs: data['NodeMeta']['lsb_distrib_description'] = kwargs['lsb_distrib_description'] if 'master' in kwargs: data['NodeMeta']['master'] = kwargs['master'] if 'check' in kwargs: data['Check'] = {} data['Check']['Name'] = kwargs['check'] if 'check_status' in kwargs: if kwargs['check_status'] not in ('unknown', 'passing', 'warning', 'critical'): ret['message'] = 'Check status must be unknown, passing, warning, or critical.' ret['res'] = False return ret data['Check']['Status'] = kwargs['check_status'] if 'check_service' in kwargs: data['Check']['ServiceID'] = kwargs['check_service'] if 'check_id' in kwargs: data['Check']['CheckID'] = kwargs['check_id'] if 'check_notes' in kwargs: data['Check']['Notes'] = kwargs['check_notes'] function = 'catalog/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = ('Catalog registration ' 'for {0} successful.'.format(kwargs['node'])) else: ret['res'] = False ret['message'] = ('Catalog registration ' 'for {0} failed.'.format(kwargs['node'])) ret['data'] = data return ret def catalog_deregister(consul_url=None, token=None, **kwargs): ''' Deregisters a node, service, or check :param consul_url: The Consul server URL. :param node: The node to deregister. :param datacenter: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param checkid: The ID of the health check to deregister. :param serviceid: The ID of the service to deregister. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.catalog_register node='node1' serviceid='redis_server1' checkid='redis_check1' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'datacenter' in kwargs: data['Datacenter'] = kwargs['datacenter'] if 'node' in kwargs: data['Node'] = kwargs['node'] else: ret['message'] = 'Node argument required.' ret['res'] = False return ret if 'checkid' in kwargs: data['CheckID'] = kwargs['checkid'] if 'serviceid' in kwargs: data['ServiceID'] = kwargs['serviceid'] function = 'catalog/deregister' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Catalog item {0} removed.'.format(kwargs['node']) else: ret['res'] = False ret['message'] = ('Removing Catalog ' 'item {0} failed.'.format(kwargs['node'])) return ret def catalog_datacenters(consul_url=None, token=None): ''' Return list of available datacenters from catalog. :param consul_url: The Consul server URL. :return: The list of available datacenters. CLI Example: .. code-block:: bash salt '*' consul.catalog_datacenters ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'catalog/datacenters' ret = _query(consul_url=consul_url, function=function, token=token) return ret def catalog_nodes(consul_url=None, token=None, **kwargs): ''' Return list of available nodes from catalog. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The list of available nodes. CLI Example: .. code-block:: bash salt '*' consul.catalog_nodes ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/nodes' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_services(consul_url=None, token=None, **kwargs): ''' Return list of available services rom catalog. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The list of available services. CLI Example: .. code-block:: bash salt '*' consul.catalog_services ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/services' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_service(consul_url=None, token=None, service=None, **kwargs): ''' Information about the registered service. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param tag: Filter returned services with tag parameter. :return: Information about the requested service. CLI Example: .. code-block:: bash salt '*' consul.catalog_service service='redis' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if 'tag' in kwargs: query_params['tag'] = kwargs['tag'] function = 'catalog/service/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_node(consul_url=None, token=None, node=None, **kwargs): ''' Information about the registered node. :param consul_url: The Consul server URL. :param node: The node to request information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.catalog_service service='redis' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/node/{0}'.format(node) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_node(consul_url=None, token=None, node=None, **kwargs): ''' Health information about the registered node. :param consul_url: The Consul server URL. :param node: The node to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_node node='node1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'health/node/{0}'.format(node) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_checks(consul_url=None, token=None, service=None, **kwargs): ''' Health information about the registered service. :param consul_url: The Consul server URL. :param service: The service to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_checks service='redis1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'health/checks/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_service(consul_url=None, token=None, service=None, **kwargs): ''' Health information about the registered service. :param consul_url: The Consul server URL. :param service: The service to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param tag: Filter returned services with tag parameter. :param passing: Filter results to only nodes with all checks in the passing state. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_service service='redis1' salt '*' consul.health_service service='redis1' passing='True' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if 'tag' in kwargs: query_params['tag'] = kwargs['tag'] if 'passing' in kwargs: query_params['passing'] = kwargs['passing'] function = 'health/service/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_state(consul_url=None, token=None, state=None, **kwargs): ''' Returns the checks in the state provided on the path. :param consul_url: The Consul server URL. :param state: The state to show checks for. The supported states are any, unknown, passing, warning, or critical. The any state is a wildcard that can be used to return all checks. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The checks in the provided state. CLI Example: .. code-block:: bash salt '*' consul.health_state state='redis1' salt '*' consul.health_state service='redis1' passing='True' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not state: raise SaltInvocationError('Required argument "state" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if state not in ('any', 'unknown', 'passing', 'warning', 'critical'): ret['message'] = 'State must be any, unknown, passing, warning, or critical.' ret['res'] = False return ret function = 'health/state/{0}'.format(state) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def status_leader(consul_url=None, token=None): ''' Returns the current Raft leader :param consul_url: The Consul server URL. :return: The address of the Raft leader. CLI Example: .. code-block:: bash salt '*' consul.status_leader ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'status/leader' ret = _query(consul_url=consul_url, function=function, token=token) return ret def status_peers(consul_url, token=None): ''' Returns the current Raft peer set :param consul_url: The Consul server URL. :return: Retrieves the Raft peers for the datacenter in which the agent is running. CLI Example: .. code-block:: bash salt '*' consul.status_peers ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'status/peers' ret = _query(consul_url=consul_url, function=function, token=token) return ret def acl_create(consul_url=None, token=None, **kwargs): ''' Create a new ACL token. :param consul_url: The Consul server URL. :param name: Meaningful indicator of the ACL's purpose. :param type: Type is either client or management. A management token is comparable to a root user and has the ability to perform any action including creating, modifying, and deleting ACLs. :param rules: The Consul server URL. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_create ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'type' in kwargs: data['Type'] = kwargs['type'] if 'rules' in kwargs: data['Rules'] = kwargs['rules'] function = 'acl/create' res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} created.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = ('Removing Catalog ' 'item {0} failed.'.format(kwargs['name'])) return ret def acl_update(consul_url=None, token=None, **kwargs): ''' Update an ACL token. :param consul_url: The Consul server URL. :param name: Meaningful indicator of the ACL's purpose. :param id: Unique identifier for the ACL to update. :param type: Type is either client or management. A management token is comparable to a root user and has the ability to perform any action including creating, modifying, and deleting ACLs. :param rules: The Consul server URL. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_update ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' in kwargs: data['ID'] = kwargs['id'] else: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'type' in kwargs: data['Type'] = kwargs['type'] if 'rules' in kwargs: data['Rules'] = kwargs['rules'] function = 'acl/update' res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} created.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = ('Adding ACL ' '{0} failed.'.format(kwargs['name'])) return ret def acl_delete(consul_url=None, token=None, **kwargs): ''' Delete an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_delete id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/delete/{0}'.format(kwargs['id']) res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} deleted.'.format(kwargs['id']) else: ret['res'] = False ret['message'] = ('Removing ACL ' '{0} failed.'.format(kwargs['id'])) return ret def acl_info(consul_url=None, **kwargs): ''' Information about an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Information about the ACL requested. CLI Example: .. code-block:: bash salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/info/{0}'.format(kwargs['id']) ret = _query(consul_url=consul_url, data=data, method='GET', function=function) return ret def acl_clone(consul_url=None, token=None, **kwargs): ''' Information about an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Boolean, message of success or failure, and new ID of cloned ACL. CLI Example: .. code-block:: bash salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/clone/{0}'.format(kwargs['id']) res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} cloned.'.format(kwargs['name']) ret['ID'] = ret['data'] else: ret['res'] = False ret['message'] = ('Cloning ACL' 'item {0} failed.'.format(kwargs['name'])) return ret def acl_list(consul_url=None, token=None, **kwargs): ''' List the ACL tokens. :param consul_url: The Consul server URL. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.acl_list ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/list' ret = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) return ret def event_fire(consul_url=None, token=None, name=None, **kwargs): ''' List the ACL tokens. :param consul_url: The Consul server URL. :param name: The name of the event to fire. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param node: Filter by node name. :param service: Filter by service name. :param tag: Filter by tag name. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.event_fire name='deploy' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not name: raise SaltInvocationError('Required argument "name" is missing.') if 'dc' in kwargs: query_params = kwargs['dc'] if 'node' in kwargs: query_params = kwargs['node'] if 'service' in kwargs: query_params = kwargs['service'] if 'tag' in kwargs: query_params = kwargs['tag'] function = 'event/fire/{0}'.format(name) res = _query(consul_url=consul_url, token=token, query_params=query_params, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'Event {0} fired.'.format(name) ret['data'] = ret['data'] else: ret['res'] = False ret['message'] = ('Cloning ACL' 'item {0} failed.'.format(kwargs['name'])) return ret def event_list(consul_url=None, token=None, **kwargs): ''' List the recent events. :param consul_url: The Consul server URL. :param name: The name of the event to fire. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.event_list ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: query_params = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') function = 'event/list/' ret = _query(consul_url=consul_url, token=token, query_params=query_params, function=function) return ret
saltstack/salt
salt/modules/consul.py
list_
python
def list_(consul_url=None, token=None, key=None, **kwargs): ''' List keys in Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :return: The list of keys. CLI Example: .. code-block:: bash salt '*' consul.list salt '*' consul.list key='web' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret query_params = {} if 'recurse' in kwargs: query_params['recurse'] = 'True' # No key so recurse and show all values if not key: query_params['recurse'] = 'True' function = 'kv/' else: function = 'kv/{0}'.format(key) query_params['keys'] = 'True' query_params['separator'] = '/' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret
List keys in Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :return: The list of keys. CLI Example: .. code-block:: bash salt '*' consul.list salt '*' consul.list key='web'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/consul.py#L116-L160
[ "def _query(function,\n consul_url,\n token=None,\n method='GET',\n api_version='v1',\n data=None,\n query_params=None):\n '''\n Consul object method function to construct and execute on the API URL.\n\n :param api_url: The Consul api url.\n :param api_version The Consul api version\n :param function: The Consul api function to perform.\n :param method: The HTTP method, e.g. GET or POST.\n :param data: The data to be sent for POST method. This param is ignored for GET requests.\n :return: The json response from the API call or False.\n '''\n\n if not query_params:\n query_params = {}\n\n ret = {'data': '',\n 'res': True}\n\n if not token:\n token = _get_token()\n\n headers = {\"X-Consul-Token\": token, \"Content-Type\": \"application/json\"}\n base_url = urllib.parse.urljoin(consul_url, '{0}/'.format(api_version))\n url = urllib.parse.urljoin(base_url, function, False)\n\n if method == 'GET':\n data = None\n else:\n if data is None:\n data = {}\n data = salt.utils.json.dumps(data)\n\n result = salt.utils.http.query(\n url,\n method=method,\n params=query_params,\n data=data,\n decode=True,\n status=True,\n header_dict=headers,\n opts=__opts__,\n )\n\n if result.get('status', None) == http_client.OK:\n ret['data'] = result.get('dict', result)\n ret['res'] = True\n elif result.get('status', None) == http_client.NO_CONTENT:\n ret['res'] = False\n elif result.get('status', None) == http_client.NOT_FOUND:\n ret['data'] = 'Key not found.'\n ret['res'] = False\n else:\n if result:\n ret['data'] = result\n ret['res'] = True\n else:\n ret['res'] = False\n return ret\n", "def _get_config():\n '''\n Retrieve Consul configuration\n '''\n return __salt__['config.get']('consul.url') or \\\n __salt__['config.get']('consul:url')\n" ]
# -*- coding: utf-8 -*- ''' Interact with Consul https://www.consul.io ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import base64 import logging # Import salt libs import salt.utils.http import salt.utils.json # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import http_client, urllib log = logging.getLogger(__name__) from salt.exceptions import SaltInvocationError # Don't shadow built-ins. __func_alias__ = { 'list_': 'list' } __virtualname__ = 'consul' def _get_config(): ''' Retrieve Consul configuration ''' return __salt__['config.get']('consul.url') or \ __salt__['config.get']('consul:url') def _get_token(): ''' Retrieve Consul configuration ''' return __salt__['config.get']('consul.token') or \ __salt__['config.get']('consul:token') def _query(function, consul_url, token=None, method='GET', api_version='v1', data=None, query_params=None): ''' Consul object method function to construct and execute on the API URL. :param api_url: The Consul api url. :param api_version The Consul api version :param function: The Consul api function to perform. :param method: The HTTP method, e.g. GET or POST. :param data: The data to be sent for POST method. This param is ignored for GET requests. :return: The json response from the API call or False. ''' if not query_params: query_params = {} ret = {'data': '', 'res': True} if not token: token = _get_token() headers = {"X-Consul-Token": token, "Content-Type": "application/json"} base_url = urllib.parse.urljoin(consul_url, '{0}/'.format(api_version)) url = urllib.parse.urljoin(base_url, function, False) if method == 'GET': data = None else: if data is None: data = {} data = salt.utils.json.dumps(data) result = salt.utils.http.query( url, method=method, params=query_params, data=data, decode=True, status=True, header_dict=headers, opts=__opts__, ) if result.get('status', None) == http_client.OK: ret['data'] = result.get('dict', result) ret['res'] = True elif result.get('status', None) == http_client.NO_CONTENT: ret['res'] = False elif result.get('status', None) == http_client.NOT_FOUND: ret['data'] = 'Key not found.' ret['res'] = False else: if result: ret['data'] = result ret['res'] = True else: ret['res'] = False return ret def get(consul_url=None, key=None, token=None, recurse=False, decode=False, raw=False): ''' Get key from Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param recurse: Return values recursively beginning at the value of key. :param decode: By default values are stored as Base64 encoded values, decode will return the whole key with the value decoded. :param raw: Simply return the decoded value of the key. :return: The keys in Consul. CLI Example: .. code-block:: bash salt '*' consul.get key='web/key1' salt '*' consul.get key='web' recurse=True salt '*' consul.get key='web' recurse=True decode=True By default values stored in Consul are base64 encoded, passing the decode option will show them as the decoded values. .. code-block:: bash salt '*' consul.get key='web' recurse=True decode=True raw=True By default Consult will return other information about the key, the raw option will return only the raw value. ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') query_params = {} function = 'kv/{0}'.format(key) if recurse: query_params['recurse'] = 'True' if raw: query_params['raw'] = True ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if ret['res']: if decode: for item in ret['data']: if item['Value'] is None: item['Value'] = "" else: item['Value'] = base64.b64decode(item['Value']) return ret def put(consul_url=None, token=None, key=None, value=None, **kwargs): ''' Put values into Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param value: The value to set the key to. :param flags: This can be used to specify an unsigned value between 0 and 2^64-1. Clients can choose to use this however makes sense for their application. :param cas: This flag is used to turn the PUT into a Check-And-Set operation. :param acquire: This flag is used to turn the PUT into a lock acquisition operation. :param release: This flag is used to turn the PUT into a lock release operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.put key='web/key1' value="Hello there" salt '*' consul.put key='web/key1' value="Hello there" acquire='d5d371f4-c380-5280-12fd-8810be175592' salt '*' consul.put key='web/key1' value="Hello there" release='d5d371f4-c380-5280-12fd-8810be175592' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') # Invalid to specified these together conflicting_args = ['cas', 'release', 'acquire'] for _l1 in conflicting_args: for _l2 in conflicting_args: if _l1 in kwargs and _l2 in kwargs and _l1 != _l2: raise SaltInvocationError('Using arguments `{0}` and `{1}`' ' together is invalid.'.format(_l1, _l2)) query_params = {} available_sessions = session_list(consul_url=consul_url, return_list=True) _current = get(consul_url=consul_url, key=key) if 'flags' in kwargs: if kwargs['flags'] >= 0 and kwargs['flags'] <= 2**64: query_params['flags'] = kwargs['flags'] if 'cas' in kwargs: if _current['res']: if kwargs['cas'] == 0: ret['message'] = ('Key {0} exists, index ' 'must be non-zero.'.format(key)) ret['res'] = False return ret if kwargs['cas'] != _current['data']['ModifyIndex']: ret['message'] = ('Key {0} exists, but indexes ' 'do not match.'.format(key)) ret['res'] = False return ret query_params['cas'] = kwargs['cas'] else: ret['message'] = ('Key {0} does not exists, ' 'CAS argument can not be used.'.format(key)) ret['res'] = False return ret if 'acquire' in kwargs: if kwargs['acquire'] not in available_sessions: ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire']) ret['res'] = False return ret query_params['acquire'] = kwargs['acquire'] if 'release' in kwargs: if _current['res']: if 'Session' in _current['data']: if _current['data']['Session'] == kwargs['release']: query_params['release'] = kwargs['release'] else: ret['message'] = '{0} locked by another session.'.format(key) ret['res'] = False return ret else: ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire']) ret['res'] = False else: log.error('Key {0} does not exist. Skipping release.') data = value function = 'kv/{0}'.format(key) method = 'PUT' ret = _query(consul_url=consul_url, token=token, function=function, method=method, data=data, query_params=query_params) if ret['res']: ret['res'] = True ret['data'] = 'Added key {0} with value {1}.'.format(key, value) else: ret['res'] = False ret['data'] = 'Unable to add key {0} with value {1}.'.format(key, value) return ret def delete(consul_url=None, token=None, key=None, **kwargs): ''' Delete values from Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param recurse: Delete values recursively beginning at the value of key. :param cas: This flag is used to turn the DELETE into a Check-And-Set operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.delete key='web' salt '*' consul.delete key='web' recurse='True' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') query_params = {} if 'recurse' in kwargs: query_params['recurse'] = True if 'cas' in kwargs: if kwargs['cas'] > 0: query_params['cas'] = kwargs['cas'] else: ret['message'] = ('Check and Set Operation ', 'value must be greater than 0.') ret['res'] = False return ret function = 'kv/{0}'.format(key) ret = _query(consul_url=consul_url, token=token, function=function, method='DELETE', query_params=query_params) if ret['res']: ret['res'] = True ret['message'] = 'Deleted key {0}.'.format(key) else: ret['res'] = False ret['message'] = 'Unable to delete key {0}.'.format(key) return ret def agent_checks(consul_url=None, token=None): ''' Returns the checks the local agent is managing :param consul_url: The Consul server URL. :return: Returns the checks the local agent is managing CLI Example: .. code-block:: bash salt '*' consul.agent_checks ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/checks' ret = _query(consul_url=consul_url, function=function, token=token, method='GET') return ret def agent_services(consul_url=None, token=None): ''' Returns the services the local agent is managing :param consul_url: The Consul server URL. :return: Returns the services the local agent is managing CLI Example: .. code-block:: bash salt '*' consul.agent_services ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/services' ret = _query(consul_url=consul_url, function=function, token=token, method='GET') return ret def agent_members(consul_url=None, token=None, **kwargs): ''' Returns the members as seen by the local serf agent :param consul_url: The Consul server URL. :return: Returns the members as seen by the local serf agent CLI Example: .. code-block:: bash salt '*' consul.agent_members ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'wan' in kwargs: query_params['wan'] = kwargs['wan'] function = 'agent/members' ret = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) return ret def agent_self(consul_url=None, token=None): ''' Returns the local node configuration :param consul_url: The Consul server URL. :return: Returns the local node configuration CLI Example: .. code-block:: bash salt '*' consul.agent_self ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/self' ret = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) return ret def agent_maintenance(consul_url=None, token=None, **kwargs): ''' Manages node maintenance mode :param consul_url: The Consul server URL. :param enable: The enable flag is required. Acceptable values are either true (to enter maintenance mode) or false (to resume normal operation). :param reason: If provided, its value should be a text string explaining the reason for placing the node into maintenance mode. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_maintenance enable='False' reason='Upgrade in progress' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'enable' in kwargs: query_params['enable'] = kwargs['enable'] else: ret['message'] = 'Required parameter "enable" is missing.' ret['res'] = False return ret if 'reason' in kwargs: query_params['reason'] = kwargs['reason'] function = 'agent/maintenance' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = ('Agent maintenance mode ' '{0}ed.'.format(kwargs['enable'])) else: ret['res'] = True ret['message'] = 'Unable to change maintenance mode for agent.' return ret def agent_join(consul_url=None, token=None, address=None, **kwargs): ''' Triggers the local agent to join a node :param consul_url: The Consul server URL. :param address: The address for the agent to connect to. :param wan: Causes the agent to attempt to join using the WAN pool. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_join address='192.168.1.1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not address: raise SaltInvocationError('Required argument "address" is missing.') if 'wan' in kwargs: query_params['wan'] = kwargs['wan'] function = 'agent/join/{0}'.format(address) res = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Agent joined the cluster' else: ret['res'] = False ret['message'] = 'Unable to join the cluster.' return ret def agent_leave(consul_url=None, token=None, node=None): ''' Used to instruct the agent to force a node into the left state. :param consul_url: The Consul server URL. :param node: The node the agent will force into left state :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_leave node='web1.example.com' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') function = 'agent/force-leave/{0}'.format(node) res = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Node {0} put in leave state.'.format(node) else: ret['res'] = False ret['message'] = 'Unable to change state for {0}.'.format(node) return ret def agent_check_register(consul_url=None, token=None, **kwargs): ''' The register endpoint is used to add a new check to the local agent. :param consul_url: The Consul server URL. :param name: The description of what the check is for. :param id: The unique name to use for the check, if not provided 'name' is used. :param notes: Human readable description of the check. :param script: If script is provided, the check type is a script, and Consul will evaluate that script based on the interval parameter. :param http: Check will perform an HTTP GET request against the value of HTTP (expected to be a URL) based on the interval parameter. :param ttl: If a TTL type is used, then the TTL update endpoint must be used periodically to update the state of the check. :param interval: Interval at which the check should run. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_register name='Memory Utilization' script='/usr/local/bin/check_mem.py' interval='15s' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if True not in [True for item in ('script', 'http', 'ttl') if item in kwargs]: ret['message'] = 'Required parameter "script" or "http" is missing.' ret['res'] = False return ret if 'id' in kwargs: data['ID'] = kwargs['id'] if 'notes' in kwargs: data['Notes'] = kwargs['notes'] if 'script' in kwargs: if 'interval' not in kwargs: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret data['Script'] = kwargs['script'] data['Interval'] = kwargs['interval'] if 'http' in kwargs: if 'interval' not in kwargs: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret data['HTTP'] = kwargs['http'] data['Interval'] = kwargs['interval'] if 'ttl' in kwargs: data['TTL'] = kwargs['ttl'] function = 'agent/check/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = ('Check {0} added to agent.'.format(kwargs['name'])) else: ret['res'] = False ret['message'] = 'Unable to add check to agent.' return ret def agent_check_deregister(consul_url=None, token=None, checkid=None): ''' The agent will take care of deregistering the check from the Catalog. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_deregister checkid='Memory Utilization' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') function = 'agent/check/deregister/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, method='GET') if res['res']: ret['res'] = True ret['message'] = ('Check {0} removed from agent.'.format(checkid)) else: ret['res'] = False ret['message'] = 'Unable to remove check from agent.' return ret def agent_check_pass(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to passing and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to mark as passing. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_pass checkid='redis_check1' note='Forcing check into passing state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/pass/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as passing.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_check_warn(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to warning and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_warn checkid='redis_check1' note='Forcing check into warning state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/warn/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as warning.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_check_fail(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to critical and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_fail checkid='redis_check1' note='Forcing check into critical state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/fail/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as critical.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_service_register(consul_url=None, token=None, **kwargs): ''' The used to add a new service, with an optional health check, to the local agent. :param consul_url: The Consul server URL. :param name: A name describing the service. :param address: The address used by the service, defaults to the address of the agent. :param port: The port used by the service. :param id: Unique ID to identify the service, if not provided the value of the name parameter is used. :param tags: Identifying tags for service, string or list. :param script: If script is provided, the check type is a script, and Consul will evaluate that script based on the interval parameter. :param http: Check will perform an HTTP GET request against the value of HTTP (expected to be a URL) based on the interval parameter. :param check_ttl: If a TTL type is used, then the TTL update endpoint must be used periodically to update the state of the check. :param check_interval: Interval at which the check should run. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_register name='redis' tags='["master", "v1"]' address="127.0.0.1" port="8080" check_script="/usr/local/bin/check_redis.py" interval="10s" ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret lc_kwargs = dict() for k, v in six.iteritems(kwargs): lc_kwargs[k.lower()] = v if 'name' in lc_kwargs: data['Name'] = lc_kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'address' in lc_kwargs: data['Address'] = lc_kwargs['address'] if 'port' in lc_kwargs: data['Port'] = lc_kwargs['port'] if 'id' in lc_kwargs: data['ID'] = lc_kwargs['id'] if 'tags' in lc_kwargs: _tags = lc_kwargs['tags'] if not isinstance(_tags, list): _tags = [_tags] data['Tags'] = _tags if 'enabletagoverride' in lc_kwargs: data['EnableTagOverride'] = lc_kwargs['enabletagoverride'] if 'check' in lc_kwargs: dd = dict() for k, v in six.iteritems(lc_kwargs['check']): dd[k.lower()] = v interval_required = False check_dd = dict() if 'script' in dd: interval_required = True check_dd['Script'] = dd['script'] if 'http' in dd: interval_required = True check_dd['HTTP'] = dd['http'] if 'ttl' in dd: check_dd['TTL'] = dd['ttl'] if 'interval' in dd: check_dd['Interval'] = dd['interval'] if interval_required: if 'Interval' not in check_dd: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret else: if 'Interval' in check_dd: del check_dd['Interval'] # not required, so ignore it if check_dd > 0: data['Check'] = check_dd # if empty, ignore it function = 'agent/service/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Service {0} registered on agent.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to register service {0}.'.format(kwargs['name']) return ret def agent_service_deregister(consul_url=None, token=None, serviceid=None): ''' Used to remove a service. :param consul_url: The Consul server URL. :param serviceid: A serviceid describing the service. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_deregister serviceid='redis' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not serviceid: raise SaltInvocationError('Required argument "serviceid" is missing.') function = 'agent/service/deregister/{0}'.format(serviceid) res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Service {0} removed from agent.'.format(serviceid) else: ret['res'] = False ret['message'] = 'Unable to remove service {0}.'.format(serviceid) return ret def agent_service_maintenance(consul_url=None, token=None, serviceid=None, **kwargs): ''' Used to place a service into maintenance mode. :param consul_url: The Consul server URL. :param serviceid: A name of the service. :param enable: Whether the service should be enabled or disabled. :param reason: A human readable message of why the service was enabled or disabled. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_deregister serviceid='redis' enable='True' reason='Down for upgrade' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not serviceid: raise SaltInvocationError('Required argument "serviceid" is missing.') if 'enable' in kwargs: query_params['enable'] = kwargs['enable'] else: ret['message'] = 'Required parameter "enable" is missing.' ret['res'] = False return ret if 'reason' in kwargs: query_params['reason'] = kwargs['reason'] function = 'agent/service/maintenance/{0}'.format(serviceid) res = _query(consul_url=consul_url, token=token, function=function, query_params=query_params) if res['res']: ret['res'] = True ret['message'] = ('Service {0} set in ' 'maintenance mode.'.format(serviceid)) else: ret['res'] = False ret['message'] = ('Unable to set service ' '{0} to maintenance mode.'.format(serviceid)) return ret def session_create(consul_url=None, token=None, **kwargs): ''' Used to create a session. :param consul_url: The Consul server URL. :param lockdelay: Duration string using a "s" suffix for seconds. The default is 15s. :param node: Must refer to a node that is already registered, if specified. By default, the agent's own node name is used. :param name: A human-readable name for the session :param checks: A list of associated health checks. It is highly recommended that, if you override this list, you include the default "serfHealth". :param behavior: Can be set to either release or delete. This controls the behavior when a session is invalidated. By default, this is release, causing any locks that are held to be released. Changing this to delete causes any locks that are held to be deleted. delete is useful for creating ephemeral key/value entries. :param ttl: Session is invalidated if it is not renewed before the TTL expires :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_create node='node1' name='my-session' behavior='delete' ttl='3600s' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret data = {} if 'lockdelay' in kwargs: data['LockDelay'] = kwargs['lockdelay'] if 'node' in kwargs: data['Node'] = kwargs['node'] if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'checks' in kwargs: data['Touch'] = kwargs['touch'] if 'behavior' in kwargs: if not kwargs['behavior'] in ('delete', 'release'): ret['message'] = ('Behavior must be ', 'either delete or release.') ret['res'] = False return ret data['Behavior'] = kwargs['behavior'] if 'ttl' in kwargs: _ttl = kwargs['ttl'] if six.text_type(_ttl).endswith('s'): _ttl = _ttl[:-1] if int(_ttl) < 0 or int(_ttl) > 3600: ret['message'] = ('TTL must be ', 'between 0 and 3600.') ret['res'] = False return ret data['TTL'] = '{0}s'.format(_ttl) function = 'session/create' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Created session {0}.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to create session {0}.'.format(kwargs['name']) return ret def session_list(consul_url=None, token=None, return_list=False, **kwargs): ''' Used to list sessions. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param return_list: By default, all information about the sessions is returned, using the return_list parameter will return a list of session IDs. :return: A list of all available sessions. CLI Example: .. code-block:: bash salt '*' consul.session_list ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/list' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if return_list: _list = [] for item in ret['data']: _list.append(item['ID']) return _list return ret def session_destroy(consul_url=None, token=None, session=None, **kwargs): ''' Destroy session :param consul_url: The Consul server URL. :param session: The ID of the session to destroy. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_destroy session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not session: raise SaltInvocationError('Required argument "session" is missing.') query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/destroy/{0}'.format(session) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Created Service {0}.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to create service {0}.'.format(kwargs['name']) return ret def session_info(consul_url=None, token=None, session=None, **kwargs): ''' Information about a session :param consul_url: The Consul server URL. :param session: The ID of the session to return information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_info session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not session: raise SaltInvocationError('Required argument "session" is missing.') query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/info/{0}'.format(session) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_register(consul_url=None, token=None, **kwargs): ''' Registers a new node, service, or check :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param node: The node to register. :param address: The address of the node. :param service: The service that will be registered. :param service_address: The address that the service listens on. :param service_port: The port for the service. :param service_id: A unique identifier for the service, if this is not provided "name" will be used. :param service_tags: Any tags associated with the service. :param check: The name of the health check to register :param check_status: The initial status of the check, must be one of unknown, passing, warning, or critical. :param check_service: The service that the check is performed against. :param check_id: Unique identifier for the service. :param check_notes: An opaque field that is meant to hold human-readable text. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.catalog_register node='node1' address='192.168.1.1' service='redis' service_address='127.0.0.1' service_port='8080' service_id='redis_server1' ''' ret = {} data = {} data['NodeMeta'] = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'datacenter' in kwargs: data['Datacenter'] = kwargs['datacenter'] if 'node' in kwargs: data['Node'] = kwargs['node'] else: ret['message'] = 'Required argument node argument is missing.' ret['res'] = False return ret if 'address' in kwargs: if isinstance(kwargs['address'], list): _address = kwargs['address'][0] else: _address = kwargs['address'] data['Address'] = _address else: ret['message'] = 'Required argument address argument is missing.' ret['res'] = False return ret if 'ip_interfaces' in kwargs: data['TaggedAddresses'] = {} for k in kwargs['ip_interfaces']: if kwargs['ip_interfaces'].get(k): data['TaggedAddresses'][k] = kwargs['ip_interfaces'][k][0] if 'service' in kwargs: data['Service'] = {} data['Service']['Service'] = kwargs['service'] if 'service_address' in kwargs: data['Service']['Address'] = kwargs['service_address'] if 'service_port' in kwargs: data['Service']['Port'] = kwargs['service_port'] if 'service_id' in kwargs: data['Service']['ID'] = kwargs['service_id'] if 'service_tags' in kwargs: _tags = kwargs['service_tags'] if not isinstance(_tags, list): _tags = [_tags] data['Service']['Tags'] = _tags if 'cpu' in kwargs: data['NodeMeta']['Cpu'] = kwargs['cpu'] if 'num_cpus' in kwargs: data['NodeMeta']['Cpu_num'] = kwargs['num_cpus'] if 'mem' in kwargs: data['NodeMeta']['Memory'] = kwargs['mem'] if 'oscode' in kwargs: data['NodeMeta']['Os'] = kwargs['oscode'] if 'osarch' in kwargs: data['NodeMeta']['Osarch'] = kwargs['osarch'] if 'kernel' in kwargs: data['NodeMeta']['Kernel'] = kwargs['kernel'] if 'kernelrelease' in kwargs: data['NodeMeta']['Kernelrelease'] = kwargs['kernelrelease'] if 'localhost' in kwargs: data['NodeMeta']['localhost'] = kwargs['localhost'] if 'nodename' in kwargs: data['NodeMeta']['nodename'] = kwargs['nodename'] if 'os_family' in kwargs: data['NodeMeta']['os_family'] = kwargs['os_family'] if 'lsb_distrib_description' in kwargs: data['NodeMeta']['lsb_distrib_description'] = kwargs['lsb_distrib_description'] if 'master' in kwargs: data['NodeMeta']['master'] = kwargs['master'] if 'check' in kwargs: data['Check'] = {} data['Check']['Name'] = kwargs['check'] if 'check_status' in kwargs: if kwargs['check_status'] not in ('unknown', 'passing', 'warning', 'critical'): ret['message'] = 'Check status must be unknown, passing, warning, or critical.' ret['res'] = False return ret data['Check']['Status'] = kwargs['check_status'] if 'check_service' in kwargs: data['Check']['ServiceID'] = kwargs['check_service'] if 'check_id' in kwargs: data['Check']['CheckID'] = kwargs['check_id'] if 'check_notes' in kwargs: data['Check']['Notes'] = kwargs['check_notes'] function = 'catalog/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = ('Catalog registration ' 'for {0} successful.'.format(kwargs['node'])) else: ret['res'] = False ret['message'] = ('Catalog registration ' 'for {0} failed.'.format(kwargs['node'])) ret['data'] = data return ret def catalog_deregister(consul_url=None, token=None, **kwargs): ''' Deregisters a node, service, or check :param consul_url: The Consul server URL. :param node: The node to deregister. :param datacenter: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param checkid: The ID of the health check to deregister. :param serviceid: The ID of the service to deregister. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.catalog_register node='node1' serviceid='redis_server1' checkid='redis_check1' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'datacenter' in kwargs: data['Datacenter'] = kwargs['datacenter'] if 'node' in kwargs: data['Node'] = kwargs['node'] else: ret['message'] = 'Node argument required.' ret['res'] = False return ret if 'checkid' in kwargs: data['CheckID'] = kwargs['checkid'] if 'serviceid' in kwargs: data['ServiceID'] = kwargs['serviceid'] function = 'catalog/deregister' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Catalog item {0} removed.'.format(kwargs['node']) else: ret['res'] = False ret['message'] = ('Removing Catalog ' 'item {0} failed.'.format(kwargs['node'])) return ret def catalog_datacenters(consul_url=None, token=None): ''' Return list of available datacenters from catalog. :param consul_url: The Consul server URL. :return: The list of available datacenters. CLI Example: .. code-block:: bash salt '*' consul.catalog_datacenters ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'catalog/datacenters' ret = _query(consul_url=consul_url, function=function, token=token) return ret def catalog_nodes(consul_url=None, token=None, **kwargs): ''' Return list of available nodes from catalog. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The list of available nodes. CLI Example: .. code-block:: bash salt '*' consul.catalog_nodes ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/nodes' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_services(consul_url=None, token=None, **kwargs): ''' Return list of available services rom catalog. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The list of available services. CLI Example: .. code-block:: bash salt '*' consul.catalog_services ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/services' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_service(consul_url=None, token=None, service=None, **kwargs): ''' Information about the registered service. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param tag: Filter returned services with tag parameter. :return: Information about the requested service. CLI Example: .. code-block:: bash salt '*' consul.catalog_service service='redis' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if 'tag' in kwargs: query_params['tag'] = kwargs['tag'] function = 'catalog/service/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_node(consul_url=None, token=None, node=None, **kwargs): ''' Information about the registered node. :param consul_url: The Consul server URL. :param node: The node to request information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.catalog_service service='redis' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/node/{0}'.format(node) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_node(consul_url=None, token=None, node=None, **kwargs): ''' Health information about the registered node. :param consul_url: The Consul server URL. :param node: The node to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_node node='node1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'health/node/{0}'.format(node) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_checks(consul_url=None, token=None, service=None, **kwargs): ''' Health information about the registered service. :param consul_url: The Consul server URL. :param service: The service to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_checks service='redis1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'health/checks/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_service(consul_url=None, token=None, service=None, **kwargs): ''' Health information about the registered service. :param consul_url: The Consul server URL. :param service: The service to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param tag: Filter returned services with tag parameter. :param passing: Filter results to only nodes with all checks in the passing state. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_service service='redis1' salt '*' consul.health_service service='redis1' passing='True' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if 'tag' in kwargs: query_params['tag'] = kwargs['tag'] if 'passing' in kwargs: query_params['passing'] = kwargs['passing'] function = 'health/service/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_state(consul_url=None, token=None, state=None, **kwargs): ''' Returns the checks in the state provided on the path. :param consul_url: The Consul server URL. :param state: The state to show checks for. The supported states are any, unknown, passing, warning, or critical. The any state is a wildcard that can be used to return all checks. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The checks in the provided state. CLI Example: .. code-block:: bash salt '*' consul.health_state state='redis1' salt '*' consul.health_state service='redis1' passing='True' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not state: raise SaltInvocationError('Required argument "state" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if state not in ('any', 'unknown', 'passing', 'warning', 'critical'): ret['message'] = 'State must be any, unknown, passing, warning, or critical.' ret['res'] = False return ret function = 'health/state/{0}'.format(state) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def status_leader(consul_url=None, token=None): ''' Returns the current Raft leader :param consul_url: The Consul server URL. :return: The address of the Raft leader. CLI Example: .. code-block:: bash salt '*' consul.status_leader ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'status/leader' ret = _query(consul_url=consul_url, function=function, token=token) return ret def status_peers(consul_url, token=None): ''' Returns the current Raft peer set :param consul_url: The Consul server URL. :return: Retrieves the Raft peers for the datacenter in which the agent is running. CLI Example: .. code-block:: bash salt '*' consul.status_peers ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'status/peers' ret = _query(consul_url=consul_url, function=function, token=token) return ret def acl_create(consul_url=None, token=None, **kwargs): ''' Create a new ACL token. :param consul_url: The Consul server URL. :param name: Meaningful indicator of the ACL's purpose. :param type: Type is either client or management. A management token is comparable to a root user and has the ability to perform any action including creating, modifying, and deleting ACLs. :param rules: The Consul server URL. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_create ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'type' in kwargs: data['Type'] = kwargs['type'] if 'rules' in kwargs: data['Rules'] = kwargs['rules'] function = 'acl/create' res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} created.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = ('Removing Catalog ' 'item {0} failed.'.format(kwargs['name'])) return ret def acl_update(consul_url=None, token=None, **kwargs): ''' Update an ACL token. :param consul_url: The Consul server URL. :param name: Meaningful indicator of the ACL's purpose. :param id: Unique identifier for the ACL to update. :param type: Type is either client or management. A management token is comparable to a root user and has the ability to perform any action including creating, modifying, and deleting ACLs. :param rules: The Consul server URL. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_update ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' in kwargs: data['ID'] = kwargs['id'] else: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'type' in kwargs: data['Type'] = kwargs['type'] if 'rules' in kwargs: data['Rules'] = kwargs['rules'] function = 'acl/update' res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} created.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = ('Adding ACL ' '{0} failed.'.format(kwargs['name'])) return ret def acl_delete(consul_url=None, token=None, **kwargs): ''' Delete an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_delete id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/delete/{0}'.format(kwargs['id']) res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} deleted.'.format(kwargs['id']) else: ret['res'] = False ret['message'] = ('Removing ACL ' '{0} failed.'.format(kwargs['id'])) return ret def acl_info(consul_url=None, **kwargs): ''' Information about an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Information about the ACL requested. CLI Example: .. code-block:: bash salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/info/{0}'.format(kwargs['id']) ret = _query(consul_url=consul_url, data=data, method='GET', function=function) return ret def acl_clone(consul_url=None, token=None, **kwargs): ''' Information about an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Boolean, message of success or failure, and new ID of cloned ACL. CLI Example: .. code-block:: bash salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/clone/{0}'.format(kwargs['id']) res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} cloned.'.format(kwargs['name']) ret['ID'] = ret['data'] else: ret['res'] = False ret['message'] = ('Cloning ACL' 'item {0} failed.'.format(kwargs['name'])) return ret def acl_list(consul_url=None, token=None, **kwargs): ''' List the ACL tokens. :param consul_url: The Consul server URL. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.acl_list ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/list' ret = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) return ret def event_fire(consul_url=None, token=None, name=None, **kwargs): ''' List the ACL tokens. :param consul_url: The Consul server URL. :param name: The name of the event to fire. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param node: Filter by node name. :param service: Filter by service name. :param tag: Filter by tag name. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.event_fire name='deploy' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not name: raise SaltInvocationError('Required argument "name" is missing.') if 'dc' in kwargs: query_params = kwargs['dc'] if 'node' in kwargs: query_params = kwargs['node'] if 'service' in kwargs: query_params = kwargs['service'] if 'tag' in kwargs: query_params = kwargs['tag'] function = 'event/fire/{0}'.format(name) res = _query(consul_url=consul_url, token=token, query_params=query_params, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'Event {0} fired.'.format(name) ret['data'] = ret['data'] else: ret['res'] = False ret['message'] = ('Cloning ACL' 'item {0} failed.'.format(kwargs['name'])) return ret def event_list(consul_url=None, token=None, **kwargs): ''' List the recent events. :param consul_url: The Consul server URL. :param name: The name of the event to fire. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.event_list ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: query_params = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') function = 'event/list/' ret = _query(consul_url=consul_url, token=token, query_params=query_params, function=function) return ret
saltstack/salt
salt/modules/consul.py
get
python
def get(consul_url=None, key=None, token=None, recurse=False, decode=False, raw=False): ''' Get key from Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param recurse: Return values recursively beginning at the value of key. :param decode: By default values are stored as Base64 encoded values, decode will return the whole key with the value decoded. :param raw: Simply return the decoded value of the key. :return: The keys in Consul. CLI Example: .. code-block:: bash salt '*' consul.get key='web/key1' salt '*' consul.get key='web' recurse=True salt '*' consul.get key='web' recurse=True decode=True By default values stored in Consul are base64 encoded, passing the decode option will show them as the decoded values. .. code-block:: bash salt '*' consul.get key='web' recurse=True decode=True raw=True By default Consult will return other information about the key, the raw option will return only the raw value. ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') query_params = {} function = 'kv/{0}'.format(key) if recurse: query_params['recurse'] = 'True' if raw: query_params['raw'] = True ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if ret['res']: if decode: for item in ret['data']: if item['Value'] is None: item['Value'] = "" else: item['Value'] = base64.b64decode(item['Value']) return ret
Get key from Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param recurse: Return values recursively beginning at the value of key. :param decode: By default values are stored as Base64 encoded values, decode will return the whole key with the value decoded. :param raw: Simply return the decoded value of the key. :return: The keys in Consul. CLI Example: .. code-block:: bash salt '*' consul.get key='web/key1' salt '*' consul.get key='web' recurse=True salt '*' consul.get key='web' recurse=True decode=True By default values stored in Consul are base64 encoded, passing the decode option will show them as the decoded values. .. code-block:: bash salt '*' consul.get key='web' recurse=True decode=True raw=True By default Consult will return other information about the key, the raw option will return only the raw value.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/consul.py#L163-L224
[ "def _query(function,\n consul_url,\n token=None,\n method='GET',\n api_version='v1',\n data=None,\n query_params=None):\n '''\n Consul object method function to construct and execute on the API URL.\n\n :param api_url: The Consul api url.\n :param api_version The Consul api version\n :param function: The Consul api function to perform.\n :param method: The HTTP method, e.g. GET or POST.\n :param data: The data to be sent for POST method. This param is ignored for GET requests.\n :return: The json response from the API call or False.\n '''\n\n if not query_params:\n query_params = {}\n\n ret = {'data': '',\n 'res': True}\n\n if not token:\n token = _get_token()\n\n headers = {\"X-Consul-Token\": token, \"Content-Type\": \"application/json\"}\n base_url = urllib.parse.urljoin(consul_url, '{0}/'.format(api_version))\n url = urllib.parse.urljoin(base_url, function, False)\n\n if method == 'GET':\n data = None\n else:\n if data is None:\n data = {}\n data = salt.utils.json.dumps(data)\n\n result = salt.utils.http.query(\n url,\n method=method,\n params=query_params,\n data=data,\n decode=True,\n status=True,\n header_dict=headers,\n opts=__opts__,\n )\n\n if result.get('status', None) == http_client.OK:\n ret['data'] = result.get('dict', result)\n ret['res'] = True\n elif result.get('status', None) == http_client.NO_CONTENT:\n ret['res'] = False\n elif result.get('status', None) == http_client.NOT_FOUND:\n ret['data'] = 'Key not found.'\n ret['res'] = False\n else:\n if result:\n ret['data'] = result\n ret['res'] = True\n else:\n ret['res'] = False\n return ret\n", "def _get_config():\n '''\n Retrieve Consul configuration\n '''\n return __salt__['config.get']('consul.url') or \\\n __salt__['config.get']('consul:url')\n" ]
# -*- coding: utf-8 -*- ''' Interact with Consul https://www.consul.io ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import base64 import logging # Import salt libs import salt.utils.http import salt.utils.json # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import http_client, urllib log = logging.getLogger(__name__) from salt.exceptions import SaltInvocationError # Don't shadow built-ins. __func_alias__ = { 'list_': 'list' } __virtualname__ = 'consul' def _get_config(): ''' Retrieve Consul configuration ''' return __salt__['config.get']('consul.url') or \ __salt__['config.get']('consul:url') def _get_token(): ''' Retrieve Consul configuration ''' return __salt__['config.get']('consul.token') or \ __salt__['config.get']('consul:token') def _query(function, consul_url, token=None, method='GET', api_version='v1', data=None, query_params=None): ''' Consul object method function to construct and execute on the API URL. :param api_url: The Consul api url. :param api_version The Consul api version :param function: The Consul api function to perform. :param method: The HTTP method, e.g. GET or POST. :param data: The data to be sent for POST method. This param is ignored for GET requests. :return: The json response from the API call or False. ''' if not query_params: query_params = {} ret = {'data': '', 'res': True} if not token: token = _get_token() headers = {"X-Consul-Token": token, "Content-Type": "application/json"} base_url = urllib.parse.urljoin(consul_url, '{0}/'.format(api_version)) url = urllib.parse.urljoin(base_url, function, False) if method == 'GET': data = None else: if data is None: data = {} data = salt.utils.json.dumps(data) result = salt.utils.http.query( url, method=method, params=query_params, data=data, decode=True, status=True, header_dict=headers, opts=__opts__, ) if result.get('status', None) == http_client.OK: ret['data'] = result.get('dict', result) ret['res'] = True elif result.get('status', None) == http_client.NO_CONTENT: ret['res'] = False elif result.get('status', None) == http_client.NOT_FOUND: ret['data'] = 'Key not found.' ret['res'] = False else: if result: ret['data'] = result ret['res'] = True else: ret['res'] = False return ret def list_(consul_url=None, token=None, key=None, **kwargs): ''' List keys in Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :return: The list of keys. CLI Example: .. code-block:: bash salt '*' consul.list salt '*' consul.list key='web' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret query_params = {} if 'recurse' in kwargs: query_params['recurse'] = 'True' # No key so recurse and show all values if not key: query_params['recurse'] = 'True' function = 'kv/' else: function = 'kv/{0}'.format(key) query_params['keys'] = 'True' query_params['separator'] = '/' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def put(consul_url=None, token=None, key=None, value=None, **kwargs): ''' Put values into Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param value: The value to set the key to. :param flags: This can be used to specify an unsigned value between 0 and 2^64-1. Clients can choose to use this however makes sense for their application. :param cas: This flag is used to turn the PUT into a Check-And-Set operation. :param acquire: This flag is used to turn the PUT into a lock acquisition operation. :param release: This flag is used to turn the PUT into a lock release operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.put key='web/key1' value="Hello there" salt '*' consul.put key='web/key1' value="Hello there" acquire='d5d371f4-c380-5280-12fd-8810be175592' salt '*' consul.put key='web/key1' value="Hello there" release='d5d371f4-c380-5280-12fd-8810be175592' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') # Invalid to specified these together conflicting_args = ['cas', 'release', 'acquire'] for _l1 in conflicting_args: for _l2 in conflicting_args: if _l1 in kwargs and _l2 in kwargs and _l1 != _l2: raise SaltInvocationError('Using arguments `{0}` and `{1}`' ' together is invalid.'.format(_l1, _l2)) query_params = {} available_sessions = session_list(consul_url=consul_url, return_list=True) _current = get(consul_url=consul_url, key=key) if 'flags' in kwargs: if kwargs['flags'] >= 0 and kwargs['flags'] <= 2**64: query_params['flags'] = kwargs['flags'] if 'cas' in kwargs: if _current['res']: if kwargs['cas'] == 0: ret['message'] = ('Key {0} exists, index ' 'must be non-zero.'.format(key)) ret['res'] = False return ret if kwargs['cas'] != _current['data']['ModifyIndex']: ret['message'] = ('Key {0} exists, but indexes ' 'do not match.'.format(key)) ret['res'] = False return ret query_params['cas'] = kwargs['cas'] else: ret['message'] = ('Key {0} does not exists, ' 'CAS argument can not be used.'.format(key)) ret['res'] = False return ret if 'acquire' in kwargs: if kwargs['acquire'] not in available_sessions: ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire']) ret['res'] = False return ret query_params['acquire'] = kwargs['acquire'] if 'release' in kwargs: if _current['res']: if 'Session' in _current['data']: if _current['data']['Session'] == kwargs['release']: query_params['release'] = kwargs['release'] else: ret['message'] = '{0} locked by another session.'.format(key) ret['res'] = False return ret else: ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire']) ret['res'] = False else: log.error('Key {0} does not exist. Skipping release.') data = value function = 'kv/{0}'.format(key) method = 'PUT' ret = _query(consul_url=consul_url, token=token, function=function, method=method, data=data, query_params=query_params) if ret['res']: ret['res'] = True ret['data'] = 'Added key {0} with value {1}.'.format(key, value) else: ret['res'] = False ret['data'] = 'Unable to add key {0} with value {1}.'.format(key, value) return ret def delete(consul_url=None, token=None, key=None, **kwargs): ''' Delete values from Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param recurse: Delete values recursively beginning at the value of key. :param cas: This flag is used to turn the DELETE into a Check-And-Set operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.delete key='web' salt '*' consul.delete key='web' recurse='True' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') query_params = {} if 'recurse' in kwargs: query_params['recurse'] = True if 'cas' in kwargs: if kwargs['cas'] > 0: query_params['cas'] = kwargs['cas'] else: ret['message'] = ('Check and Set Operation ', 'value must be greater than 0.') ret['res'] = False return ret function = 'kv/{0}'.format(key) ret = _query(consul_url=consul_url, token=token, function=function, method='DELETE', query_params=query_params) if ret['res']: ret['res'] = True ret['message'] = 'Deleted key {0}.'.format(key) else: ret['res'] = False ret['message'] = 'Unable to delete key {0}.'.format(key) return ret def agent_checks(consul_url=None, token=None): ''' Returns the checks the local agent is managing :param consul_url: The Consul server URL. :return: Returns the checks the local agent is managing CLI Example: .. code-block:: bash salt '*' consul.agent_checks ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/checks' ret = _query(consul_url=consul_url, function=function, token=token, method='GET') return ret def agent_services(consul_url=None, token=None): ''' Returns the services the local agent is managing :param consul_url: The Consul server URL. :return: Returns the services the local agent is managing CLI Example: .. code-block:: bash salt '*' consul.agent_services ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/services' ret = _query(consul_url=consul_url, function=function, token=token, method='GET') return ret def agent_members(consul_url=None, token=None, **kwargs): ''' Returns the members as seen by the local serf agent :param consul_url: The Consul server URL. :return: Returns the members as seen by the local serf agent CLI Example: .. code-block:: bash salt '*' consul.agent_members ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'wan' in kwargs: query_params['wan'] = kwargs['wan'] function = 'agent/members' ret = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) return ret def agent_self(consul_url=None, token=None): ''' Returns the local node configuration :param consul_url: The Consul server URL. :return: Returns the local node configuration CLI Example: .. code-block:: bash salt '*' consul.agent_self ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/self' ret = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) return ret def agent_maintenance(consul_url=None, token=None, **kwargs): ''' Manages node maintenance mode :param consul_url: The Consul server URL. :param enable: The enable flag is required. Acceptable values are either true (to enter maintenance mode) or false (to resume normal operation). :param reason: If provided, its value should be a text string explaining the reason for placing the node into maintenance mode. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_maintenance enable='False' reason='Upgrade in progress' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'enable' in kwargs: query_params['enable'] = kwargs['enable'] else: ret['message'] = 'Required parameter "enable" is missing.' ret['res'] = False return ret if 'reason' in kwargs: query_params['reason'] = kwargs['reason'] function = 'agent/maintenance' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = ('Agent maintenance mode ' '{0}ed.'.format(kwargs['enable'])) else: ret['res'] = True ret['message'] = 'Unable to change maintenance mode for agent.' return ret def agent_join(consul_url=None, token=None, address=None, **kwargs): ''' Triggers the local agent to join a node :param consul_url: The Consul server URL. :param address: The address for the agent to connect to. :param wan: Causes the agent to attempt to join using the WAN pool. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_join address='192.168.1.1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not address: raise SaltInvocationError('Required argument "address" is missing.') if 'wan' in kwargs: query_params['wan'] = kwargs['wan'] function = 'agent/join/{0}'.format(address) res = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Agent joined the cluster' else: ret['res'] = False ret['message'] = 'Unable to join the cluster.' return ret def agent_leave(consul_url=None, token=None, node=None): ''' Used to instruct the agent to force a node into the left state. :param consul_url: The Consul server URL. :param node: The node the agent will force into left state :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_leave node='web1.example.com' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') function = 'agent/force-leave/{0}'.format(node) res = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Node {0} put in leave state.'.format(node) else: ret['res'] = False ret['message'] = 'Unable to change state for {0}.'.format(node) return ret def agent_check_register(consul_url=None, token=None, **kwargs): ''' The register endpoint is used to add a new check to the local agent. :param consul_url: The Consul server URL. :param name: The description of what the check is for. :param id: The unique name to use for the check, if not provided 'name' is used. :param notes: Human readable description of the check. :param script: If script is provided, the check type is a script, and Consul will evaluate that script based on the interval parameter. :param http: Check will perform an HTTP GET request against the value of HTTP (expected to be a URL) based on the interval parameter. :param ttl: If a TTL type is used, then the TTL update endpoint must be used periodically to update the state of the check. :param interval: Interval at which the check should run. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_register name='Memory Utilization' script='/usr/local/bin/check_mem.py' interval='15s' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if True not in [True for item in ('script', 'http', 'ttl') if item in kwargs]: ret['message'] = 'Required parameter "script" or "http" is missing.' ret['res'] = False return ret if 'id' in kwargs: data['ID'] = kwargs['id'] if 'notes' in kwargs: data['Notes'] = kwargs['notes'] if 'script' in kwargs: if 'interval' not in kwargs: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret data['Script'] = kwargs['script'] data['Interval'] = kwargs['interval'] if 'http' in kwargs: if 'interval' not in kwargs: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret data['HTTP'] = kwargs['http'] data['Interval'] = kwargs['interval'] if 'ttl' in kwargs: data['TTL'] = kwargs['ttl'] function = 'agent/check/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = ('Check {0} added to agent.'.format(kwargs['name'])) else: ret['res'] = False ret['message'] = 'Unable to add check to agent.' return ret def agent_check_deregister(consul_url=None, token=None, checkid=None): ''' The agent will take care of deregistering the check from the Catalog. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_deregister checkid='Memory Utilization' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') function = 'agent/check/deregister/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, method='GET') if res['res']: ret['res'] = True ret['message'] = ('Check {0} removed from agent.'.format(checkid)) else: ret['res'] = False ret['message'] = 'Unable to remove check from agent.' return ret def agent_check_pass(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to passing and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to mark as passing. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_pass checkid='redis_check1' note='Forcing check into passing state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/pass/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as passing.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_check_warn(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to warning and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_warn checkid='redis_check1' note='Forcing check into warning state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/warn/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as warning.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_check_fail(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to critical and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_fail checkid='redis_check1' note='Forcing check into critical state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/fail/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as critical.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_service_register(consul_url=None, token=None, **kwargs): ''' The used to add a new service, with an optional health check, to the local agent. :param consul_url: The Consul server URL. :param name: A name describing the service. :param address: The address used by the service, defaults to the address of the agent. :param port: The port used by the service. :param id: Unique ID to identify the service, if not provided the value of the name parameter is used. :param tags: Identifying tags for service, string or list. :param script: If script is provided, the check type is a script, and Consul will evaluate that script based on the interval parameter. :param http: Check will perform an HTTP GET request against the value of HTTP (expected to be a URL) based on the interval parameter. :param check_ttl: If a TTL type is used, then the TTL update endpoint must be used periodically to update the state of the check. :param check_interval: Interval at which the check should run. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_register name='redis' tags='["master", "v1"]' address="127.0.0.1" port="8080" check_script="/usr/local/bin/check_redis.py" interval="10s" ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret lc_kwargs = dict() for k, v in six.iteritems(kwargs): lc_kwargs[k.lower()] = v if 'name' in lc_kwargs: data['Name'] = lc_kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'address' in lc_kwargs: data['Address'] = lc_kwargs['address'] if 'port' in lc_kwargs: data['Port'] = lc_kwargs['port'] if 'id' in lc_kwargs: data['ID'] = lc_kwargs['id'] if 'tags' in lc_kwargs: _tags = lc_kwargs['tags'] if not isinstance(_tags, list): _tags = [_tags] data['Tags'] = _tags if 'enabletagoverride' in lc_kwargs: data['EnableTagOverride'] = lc_kwargs['enabletagoverride'] if 'check' in lc_kwargs: dd = dict() for k, v in six.iteritems(lc_kwargs['check']): dd[k.lower()] = v interval_required = False check_dd = dict() if 'script' in dd: interval_required = True check_dd['Script'] = dd['script'] if 'http' in dd: interval_required = True check_dd['HTTP'] = dd['http'] if 'ttl' in dd: check_dd['TTL'] = dd['ttl'] if 'interval' in dd: check_dd['Interval'] = dd['interval'] if interval_required: if 'Interval' not in check_dd: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret else: if 'Interval' in check_dd: del check_dd['Interval'] # not required, so ignore it if check_dd > 0: data['Check'] = check_dd # if empty, ignore it function = 'agent/service/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Service {0} registered on agent.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to register service {0}.'.format(kwargs['name']) return ret def agent_service_deregister(consul_url=None, token=None, serviceid=None): ''' Used to remove a service. :param consul_url: The Consul server URL. :param serviceid: A serviceid describing the service. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_deregister serviceid='redis' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not serviceid: raise SaltInvocationError('Required argument "serviceid" is missing.') function = 'agent/service/deregister/{0}'.format(serviceid) res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Service {0} removed from agent.'.format(serviceid) else: ret['res'] = False ret['message'] = 'Unable to remove service {0}.'.format(serviceid) return ret def agent_service_maintenance(consul_url=None, token=None, serviceid=None, **kwargs): ''' Used to place a service into maintenance mode. :param consul_url: The Consul server URL. :param serviceid: A name of the service. :param enable: Whether the service should be enabled or disabled. :param reason: A human readable message of why the service was enabled or disabled. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_deregister serviceid='redis' enable='True' reason='Down for upgrade' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not serviceid: raise SaltInvocationError('Required argument "serviceid" is missing.') if 'enable' in kwargs: query_params['enable'] = kwargs['enable'] else: ret['message'] = 'Required parameter "enable" is missing.' ret['res'] = False return ret if 'reason' in kwargs: query_params['reason'] = kwargs['reason'] function = 'agent/service/maintenance/{0}'.format(serviceid) res = _query(consul_url=consul_url, token=token, function=function, query_params=query_params) if res['res']: ret['res'] = True ret['message'] = ('Service {0} set in ' 'maintenance mode.'.format(serviceid)) else: ret['res'] = False ret['message'] = ('Unable to set service ' '{0} to maintenance mode.'.format(serviceid)) return ret def session_create(consul_url=None, token=None, **kwargs): ''' Used to create a session. :param consul_url: The Consul server URL. :param lockdelay: Duration string using a "s" suffix for seconds. The default is 15s. :param node: Must refer to a node that is already registered, if specified. By default, the agent's own node name is used. :param name: A human-readable name for the session :param checks: A list of associated health checks. It is highly recommended that, if you override this list, you include the default "serfHealth". :param behavior: Can be set to either release or delete. This controls the behavior when a session is invalidated. By default, this is release, causing any locks that are held to be released. Changing this to delete causes any locks that are held to be deleted. delete is useful for creating ephemeral key/value entries. :param ttl: Session is invalidated if it is not renewed before the TTL expires :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_create node='node1' name='my-session' behavior='delete' ttl='3600s' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret data = {} if 'lockdelay' in kwargs: data['LockDelay'] = kwargs['lockdelay'] if 'node' in kwargs: data['Node'] = kwargs['node'] if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'checks' in kwargs: data['Touch'] = kwargs['touch'] if 'behavior' in kwargs: if not kwargs['behavior'] in ('delete', 'release'): ret['message'] = ('Behavior must be ', 'either delete or release.') ret['res'] = False return ret data['Behavior'] = kwargs['behavior'] if 'ttl' in kwargs: _ttl = kwargs['ttl'] if six.text_type(_ttl).endswith('s'): _ttl = _ttl[:-1] if int(_ttl) < 0 or int(_ttl) > 3600: ret['message'] = ('TTL must be ', 'between 0 and 3600.') ret['res'] = False return ret data['TTL'] = '{0}s'.format(_ttl) function = 'session/create' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Created session {0}.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to create session {0}.'.format(kwargs['name']) return ret def session_list(consul_url=None, token=None, return_list=False, **kwargs): ''' Used to list sessions. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param return_list: By default, all information about the sessions is returned, using the return_list parameter will return a list of session IDs. :return: A list of all available sessions. CLI Example: .. code-block:: bash salt '*' consul.session_list ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/list' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if return_list: _list = [] for item in ret['data']: _list.append(item['ID']) return _list return ret def session_destroy(consul_url=None, token=None, session=None, **kwargs): ''' Destroy session :param consul_url: The Consul server URL. :param session: The ID of the session to destroy. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_destroy session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not session: raise SaltInvocationError('Required argument "session" is missing.') query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/destroy/{0}'.format(session) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Created Service {0}.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to create service {0}.'.format(kwargs['name']) return ret def session_info(consul_url=None, token=None, session=None, **kwargs): ''' Information about a session :param consul_url: The Consul server URL. :param session: The ID of the session to return information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_info session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not session: raise SaltInvocationError('Required argument "session" is missing.') query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/info/{0}'.format(session) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_register(consul_url=None, token=None, **kwargs): ''' Registers a new node, service, or check :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param node: The node to register. :param address: The address of the node. :param service: The service that will be registered. :param service_address: The address that the service listens on. :param service_port: The port for the service. :param service_id: A unique identifier for the service, if this is not provided "name" will be used. :param service_tags: Any tags associated with the service. :param check: The name of the health check to register :param check_status: The initial status of the check, must be one of unknown, passing, warning, or critical. :param check_service: The service that the check is performed against. :param check_id: Unique identifier for the service. :param check_notes: An opaque field that is meant to hold human-readable text. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.catalog_register node='node1' address='192.168.1.1' service='redis' service_address='127.0.0.1' service_port='8080' service_id='redis_server1' ''' ret = {} data = {} data['NodeMeta'] = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'datacenter' in kwargs: data['Datacenter'] = kwargs['datacenter'] if 'node' in kwargs: data['Node'] = kwargs['node'] else: ret['message'] = 'Required argument node argument is missing.' ret['res'] = False return ret if 'address' in kwargs: if isinstance(kwargs['address'], list): _address = kwargs['address'][0] else: _address = kwargs['address'] data['Address'] = _address else: ret['message'] = 'Required argument address argument is missing.' ret['res'] = False return ret if 'ip_interfaces' in kwargs: data['TaggedAddresses'] = {} for k in kwargs['ip_interfaces']: if kwargs['ip_interfaces'].get(k): data['TaggedAddresses'][k] = kwargs['ip_interfaces'][k][0] if 'service' in kwargs: data['Service'] = {} data['Service']['Service'] = kwargs['service'] if 'service_address' in kwargs: data['Service']['Address'] = kwargs['service_address'] if 'service_port' in kwargs: data['Service']['Port'] = kwargs['service_port'] if 'service_id' in kwargs: data['Service']['ID'] = kwargs['service_id'] if 'service_tags' in kwargs: _tags = kwargs['service_tags'] if not isinstance(_tags, list): _tags = [_tags] data['Service']['Tags'] = _tags if 'cpu' in kwargs: data['NodeMeta']['Cpu'] = kwargs['cpu'] if 'num_cpus' in kwargs: data['NodeMeta']['Cpu_num'] = kwargs['num_cpus'] if 'mem' in kwargs: data['NodeMeta']['Memory'] = kwargs['mem'] if 'oscode' in kwargs: data['NodeMeta']['Os'] = kwargs['oscode'] if 'osarch' in kwargs: data['NodeMeta']['Osarch'] = kwargs['osarch'] if 'kernel' in kwargs: data['NodeMeta']['Kernel'] = kwargs['kernel'] if 'kernelrelease' in kwargs: data['NodeMeta']['Kernelrelease'] = kwargs['kernelrelease'] if 'localhost' in kwargs: data['NodeMeta']['localhost'] = kwargs['localhost'] if 'nodename' in kwargs: data['NodeMeta']['nodename'] = kwargs['nodename'] if 'os_family' in kwargs: data['NodeMeta']['os_family'] = kwargs['os_family'] if 'lsb_distrib_description' in kwargs: data['NodeMeta']['lsb_distrib_description'] = kwargs['lsb_distrib_description'] if 'master' in kwargs: data['NodeMeta']['master'] = kwargs['master'] if 'check' in kwargs: data['Check'] = {} data['Check']['Name'] = kwargs['check'] if 'check_status' in kwargs: if kwargs['check_status'] not in ('unknown', 'passing', 'warning', 'critical'): ret['message'] = 'Check status must be unknown, passing, warning, or critical.' ret['res'] = False return ret data['Check']['Status'] = kwargs['check_status'] if 'check_service' in kwargs: data['Check']['ServiceID'] = kwargs['check_service'] if 'check_id' in kwargs: data['Check']['CheckID'] = kwargs['check_id'] if 'check_notes' in kwargs: data['Check']['Notes'] = kwargs['check_notes'] function = 'catalog/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = ('Catalog registration ' 'for {0} successful.'.format(kwargs['node'])) else: ret['res'] = False ret['message'] = ('Catalog registration ' 'for {0} failed.'.format(kwargs['node'])) ret['data'] = data return ret def catalog_deregister(consul_url=None, token=None, **kwargs): ''' Deregisters a node, service, or check :param consul_url: The Consul server URL. :param node: The node to deregister. :param datacenter: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param checkid: The ID of the health check to deregister. :param serviceid: The ID of the service to deregister. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.catalog_register node='node1' serviceid='redis_server1' checkid='redis_check1' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'datacenter' in kwargs: data['Datacenter'] = kwargs['datacenter'] if 'node' in kwargs: data['Node'] = kwargs['node'] else: ret['message'] = 'Node argument required.' ret['res'] = False return ret if 'checkid' in kwargs: data['CheckID'] = kwargs['checkid'] if 'serviceid' in kwargs: data['ServiceID'] = kwargs['serviceid'] function = 'catalog/deregister' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Catalog item {0} removed.'.format(kwargs['node']) else: ret['res'] = False ret['message'] = ('Removing Catalog ' 'item {0} failed.'.format(kwargs['node'])) return ret def catalog_datacenters(consul_url=None, token=None): ''' Return list of available datacenters from catalog. :param consul_url: The Consul server URL. :return: The list of available datacenters. CLI Example: .. code-block:: bash salt '*' consul.catalog_datacenters ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'catalog/datacenters' ret = _query(consul_url=consul_url, function=function, token=token) return ret def catalog_nodes(consul_url=None, token=None, **kwargs): ''' Return list of available nodes from catalog. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The list of available nodes. CLI Example: .. code-block:: bash salt '*' consul.catalog_nodes ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/nodes' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_services(consul_url=None, token=None, **kwargs): ''' Return list of available services rom catalog. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The list of available services. CLI Example: .. code-block:: bash salt '*' consul.catalog_services ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/services' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_service(consul_url=None, token=None, service=None, **kwargs): ''' Information about the registered service. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param tag: Filter returned services with tag parameter. :return: Information about the requested service. CLI Example: .. code-block:: bash salt '*' consul.catalog_service service='redis' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if 'tag' in kwargs: query_params['tag'] = kwargs['tag'] function = 'catalog/service/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_node(consul_url=None, token=None, node=None, **kwargs): ''' Information about the registered node. :param consul_url: The Consul server URL. :param node: The node to request information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.catalog_service service='redis' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/node/{0}'.format(node) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_node(consul_url=None, token=None, node=None, **kwargs): ''' Health information about the registered node. :param consul_url: The Consul server URL. :param node: The node to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_node node='node1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'health/node/{0}'.format(node) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_checks(consul_url=None, token=None, service=None, **kwargs): ''' Health information about the registered service. :param consul_url: The Consul server URL. :param service: The service to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_checks service='redis1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'health/checks/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_service(consul_url=None, token=None, service=None, **kwargs): ''' Health information about the registered service. :param consul_url: The Consul server URL. :param service: The service to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param tag: Filter returned services with tag parameter. :param passing: Filter results to only nodes with all checks in the passing state. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_service service='redis1' salt '*' consul.health_service service='redis1' passing='True' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if 'tag' in kwargs: query_params['tag'] = kwargs['tag'] if 'passing' in kwargs: query_params['passing'] = kwargs['passing'] function = 'health/service/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_state(consul_url=None, token=None, state=None, **kwargs): ''' Returns the checks in the state provided on the path. :param consul_url: The Consul server URL. :param state: The state to show checks for. The supported states are any, unknown, passing, warning, or critical. The any state is a wildcard that can be used to return all checks. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The checks in the provided state. CLI Example: .. code-block:: bash salt '*' consul.health_state state='redis1' salt '*' consul.health_state service='redis1' passing='True' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not state: raise SaltInvocationError('Required argument "state" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if state not in ('any', 'unknown', 'passing', 'warning', 'critical'): ret['message'] = 'State must be any, unknown, passing, warning, or critical.' ret['res'] = False return ret function = 'health/state/{0}'.format(state) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def status_leader(consul_url=None, token=None): ''' Returns the current Raft leader :param consul_url: The Consul server URL. :return: The address of the Raft leader. CLI Example: .. code-block:: bash salt '*' consul.status_leader ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'status/leader' ret = _query(consul_url=consul_url, function=function, token=token) return ret def status_peers(consul_url, token=None): ''' Returns the current Raft peer set :param consul_url: The Consul server URL. :return: Retrieves the Raft peers for the datacenter in which the agent is running. CLI Example: .. code-block:: bash salt '*' consul.status_peers ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'status/peers' ret = _query(consul_url=consul_url, function=function, token=token) return ret def acl_create(consul_url=None, token=None, **kwargs): ''' Create a new ACL token. :param consul_url: The Consul server URL. :param name: Meaningful indicator of the ACL's purpose. :param type: Type is either client or management. A management token is comparable to a root user and has the ability to perform any action including creating, modifying, and deleting ACLs. :param rules: The Consul server URL. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_create ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'type' in kwargs: data['Type'] = kwargs['type'] if 'rules' in kwargs: data['Rules'] = kwargs['rules'] function = 'acl/create' res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} created.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = ('Removing Catalog ' 'item {0} failed.'.format(kwargs['name'])) return ret def acl_update(consul_url=None, token=None, **kwargs): ''' Update an ACL token. :param consul_url: The Consul server URL. :param name: Meaningful indicator of the ACL's purpose. :param id: Unique identifier for the ACL to update. :param type: Type is either client or management. A management token is comparable to a root user and has the ability to perform any action including creating, modifying, and deleting ACLs. :param rules: The Consul server URL. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_update ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' in kwargs: data['ID'] = kwargs['id'] else: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'type' in kwargs: data['Type'] = kwargs['type'] if 'rules' in kwargs: data['Rules'] = kwargs['rules'] function = 'acl/update' res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} created.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = ('Adding ACL ' '{0} failed.'.format(kwargs['name'])) return ret def acl_delete(consul_url=None, token=None, **kwargs): ''' Delete an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_delete id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/delete/{0}'.format(kwargs['id']) res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} deleted.'.format(kwargs['id']) else: ret['res'] = False ret['message'] = ('Removing ACL ' '{0} failed.'.format(kwargs['id'])) return ret def acl_info(consul_url=None, **kwargs): ''' Information about an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Information about the ACL requested. CLI Example: .. code-block:: bash salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/info/{0}'.format(kwargs['id']) ret = _query(consul_url=consul_url, data=data, method='GET', function=function) return ret def acl_clone(consul_url=None, token=None, **kwargs): ''' Information about an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Boolean, message of success or failure, and new ID of cloned ACL. CLI Example: .. code-block:: bash salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/clone/{0}'.format(kwargs['id']) res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} cloned.'.format(kwargs['name']) ret['ID'] = ret['data'] else: ret['res'] = False ret['message'] = ('Cloning ACL' 'item {0} failed.'.format(kwargs['name'])) return ret def acl_list(consul_url=None, token=None, **kwargs): ''' List the ACL tokens. :param consul_url: The Consul server URL. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.acl_list ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/list' ret = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) return ret def event_fire(consul_url=None, token=None, name=None, **kwargs): ''' List the ACL tokens. :param consul_url: The Consul server URL. :param name: The name of the event to fire. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param node: Filter by node name. :param service: Filter by service name. :param tag: Filter by tag name. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.event_fire name='deploy' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not name: raise SaltInvocationError('Required argument "name" is missing.') if 'dc' in kwargs: query_params = kwargs['dc'] if 'node' in kwargs: query_params = kwargs['node'] if 'service' in kwargs: query_params = kwargs['service'] if 'tag' in kwargs: query_params = kwargs['tag'] function = 'event/fire/{0}'.format(name) res = _query(consul_url=consul_url, token=token, query_params=query_params, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'Event {0} fired.'.format(name) ret['data'] = ret['data'] else: ret['res'] = False ret['message'] = ('Cloning ACL' 'item {0} failed.'.format(kwargs['name'])) return ret def event_list(consul_url=None, token=None, **kwargs): ''' List the recent events. :param consul_url: The Consul server URL. :param name: The name of the event to fire. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.event_list ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: query_params = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') function = 'event/list/' ret = _query(consul_url=consul_url, token=token, query_params=query_params, function=function) return ret
saltstack/salt
salt/modules/consul.py
put
python
def put(consul_url=None, token=None, key=None, value=None, **kwargs): ''' Put values into Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param value: The value to set the key to. :param flags: This can be used to specify an unsigned value between 0 and 2^64-1. Clients can choose to use this however makes sense for their application. :param cas: This flag is used to turn the PUT into a Check-And-Set operation. :param acquire: This flag is used to turn the PUT into a lock acquisition operation. :param release: This flag is used to turn the PUT into a lock release operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.put key='web/key1' value="Hello there" salt '*' consul.put key='web/key1' value="Hello there" acquire='d5d371f4-c380-5280-12fd-8810be175592' salt '*' consul.put key='web/key1' value="Hello there" release='d5d371f4-c380-5280-12fd-8810be175592' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') # Invalid to specified these together conflicting_args = ['cas', 'release', 'acquire'] for _l1 in conflicting_args: for _l2 in conflicting_args: if _l1 in kwargs and _l2 in kwargs and _l1 != _l2: raise SaltInvocationError('Using arguments `{0}` and `{1}`' ' together is invalid.'.format(_l1, _l2)) query_params = {} available_sessions = session_list(consul_url=consul_url, return_list=True) _current = get(consul_url=consul_url, key=key) if 'flags' in kwargs: if kwargs['flags'] >= 0 and kwargs['flags'] <= 2**64: query_params['flags'] = kwargs['flags'] if 'cas' in kwargs: if _current['res']: if kwargs['cas'] == 0: ret['message'] = ('Key {0} exists, index ' 'must be non-zero.'.format(key)) ret['res'] = False return ret if kwargs['cas'] != _current['data']['ModifyIndex']: ret['message'] = ('Key {0} exists, but indexes ' 'do not match.'.format(key)) ret['res'] = False return ret query_params['cas'] = kwargs['cas'] else: ret['message'] = ('Key {0} does not exists, ' 'CAS argument can not be used.'.format(key)) ret['res'] = False return ret if 'acquire' in kwargs: if kwargs['acquire'] not in available_sessions: ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire']) ret['res'] = False return ret query_params['acquire'] = kwargs['acquire'] if 'release' in kwargs: if _current['res']: if 'Session' in _current['data']: if _current['data']['Session'] == kwargs['release']: query_params['release'] = kwargs['release'] else: ret['message'] = '{0} locked by another session.'.format(key) ret['res'] = False return ret else: ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire']) ret['res'] = False else: log.error('Key {0} does not exist. Skipping release.') data = value function = 'kv/{0}'.format(key) method = 'PUT' ret = _query(consul_url=consul_url, token=token, function=function, method=method, data=data, query_params=query_params) if ret['res']: ret['res'] = True ret['data'] = 'Added key {0} with value {1}.'.format(key, value) else: ret['res'] = False ret['data'] = 'Unable to add key {0} with value {1}.'.format(key, value) return ret
Put values into Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param value: The value to set the key to. :param flags: This can be used to specify an unsigned value between 0 and 2^64-1. Clients can choose to use this however makes sense for their application. :param cas: This flag is used to turn the PUT into a Check-And-Set operation. :param acquire: This flag is used to turn the PUT into a lock acquisition operation. :param release: This flag is used to turn the PUT into a lock release operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.put key='web/key1' value="Hello there" salt '*' consul.put key='web/key1' value="Hello there" acquire='d5d371f4-c380-5280-12fd-8810be175592' salt '*' consul.put key='web/key1' value="Hello there" release='d5d371f4-c380-5280-12fd-8810be175592'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/consul.py#L227-L345
[ "def get(consul_url=None, key=None, token=None, recurse=False, decode=False, raw=False):\n '''\n Get key from Consul\n\n :param consul_url: The Consul server URL.\n :param key: The key to use as the starting point for the list.\n :param recurse: Return values recursively beginning at the value of key.\n :param decode: By default values are stored as Base64 encoded values,\n decode will return the whole key with the value decoded.\n :param raw: Simply return the decoded value of the key.\n :return: The keys in Consul.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' consul.get key='web/key1'\n salt '*' consul.get key='web' recurse=True\n salt '*' consul.get key='web' recurse=True decode=True\n\n By default values stored in Consul are base64 encoded, passing the\n decode option will show them as the decoded values.\n\n .. code-block:: bash\n\n salt '*' consul.get key='web' recurse=True decode=True raw=True\n\n By default Consult will return other information about the key, the raw\n option will return only the raw value.\n\n '''\n ret = {}\n if not consul_url:\n consul_url = _get_config()\n if not consul_url:\n log.error('No Consul URL found.')\n ret['message'] = 'No Consul URL found.'\n ret['res'] = False\n return ret\n\n if not key:\n raise SaltInvocationError('Required argument \"key\" is missing.')\n\n query_params = {}\n function = 'kv/{0}'.format(key)\n if recurse:\n query_params['recurse'] = 'True'\n if raw:\n query_params['raw'] = True\n ret = _query(consul_url=consul_url,\n function=function,\n token=token,\n query_params=query_params)\n\n if ret['res']:\n if decode:\n for item in ret['data']:\n if item['Value'] is None:\n item['Value'] = \"\"\n else:\n item['Value'] = base64.b64decode(item['Value'])\n return ret\n", "def _query(function,\n consul_url,\n token=None,\n method='GET',\n api_version='v1',\n data=None,\n query_params=None):\n '''\n Consul object method function to construct and execute on the API URL.\n\n :param api_url: The Consul api url.\n :param api_version The Consul api version\n :param function: The Consul api function to perform.\n :param method: The HTTP method, e.g. GET or POST.\n :param data: The data to be sent for POST method. This param is ignored for GET requests.\n :return: The json response from the API call or False.\n '''\n\n if not query_params:\n query_params = {}\n\n ret = {'data': '',\n 'res': True}\n\n if not token:\n token = _get_token()\n\n headers = {\"X-Consul-Token\": token, \"Content-Type\": \"application/json\"}\n base_url = urllib.parse.urljoin(consul_url, '{0}/'.format(api_version))\n url = urllib.parse.urljoin(base_url, function, False)\n\n if method == 'GET':\n data = None\n else:\n if data is None:\n data = {}\n data = salt.utils.json.dumps(data)\n\n result = salt.utils.http.query(\n url,\n method=method,\n params=query_params,\n data=data,\n decode=True,\n status=True,\n header_dict=headers,\n opts=__opts__,\n )\n\n if result.get('status', None) == http_client.OK:\n ret['data'] = result.get('dict', result)\n ret['res'] = True\n elif result.get('status', None) == http_client.NO_CONTENT:\n ret['res'] = False\n elif result.get('status', None) == http_client.NOT_FOUND:\n ret['data'] = 'Key not found.'\n ret['res'] = False\n else:\n if result:\n ret['data'] = result\n ret['res'] = True\n else:\n ret['res'] = False\n return ret\n", "def _get_config():\n '''\n Retrieve Consul configuration\n '''\n return __salt__['config.get']('consul.url') or \\\n __salt__['config.get']('consul:url')\n", "def session_list(consul_url=None, token=None, return_list=False, **kwargs):\n '''\n Used to list sessions.\n\n :param consul_url: The Consul server URL.\n :param dc: By default, the datacenter of the agent is queried;\n however, the dc can be provided using the \"dc\" parameter.\n :param return_list: By default, all information about the sessions is\n returned, using the return_list parameter will return\n a list of session IDs.\n :return: A list of all available sessions.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' consul.session_list\n\n '''\n ret = {}\n if not consul_url:\n consul_url = _get_config()\n if not consul_url:\n log.error('No Consul URL found.')\n ret['message'] = 'No Consul URL found.'\n ret['res'] = False\n return ret\n\n query_params = {}\n\n if 'dc' in kwargs:\n query_params['dc'] = kwargs['dc']\n\n function = 'session/list'\n ret = _query(consul_url=consul_url,\n function=function,\n token=token,\n query_params=query_params)\n\n if return_list:\n _list = []\n for item in ret['data']:\n _list.append(item['ID'])\n return _list\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Interact with Consul https://www.consul.io ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import base64 import logging # Import salt libs import salt.utils.http import salt.utils.json # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import http_client, urllib log = logging.getLogger(__name__) from salt.exceptions import SaltInvocationError # Don't shadow built-ins. __func_alias__ = { 'list_': 'list' } __virtualname__ = 'consul' def _get_config(): ''' Retrieve Consul configuration ''' return __salt__['config.get']('consul.url') or \ __salt__['config.get']('consul:url') def _get_token(): ''' Retrieve Consul configuration ''' return __salt__['config.get']('consul.token') or \ __salt__['config.get']('consul:token') def _query(function, consul_url, token=None, method='GET', api_version='v1', data=None, query_params=None): ''' Consul object method function to construct and execute on the API URL. :param api_url: The Consul api url. :param api_version The Consul api version :param function: The Consul api function to perform. :param method: The HTTP method, e.g. GET or POST. :param data: The data to be sent for POST method. This param is ignored for GET requests. :return: The json response from the API call or False. ''' if not query_params: query_params = {} ret = {'data': '', 'res': True} if not token: token = _get_token() headers = {"X-Consul-Token": token, "Content-Type": "application/json"} base_url = urllib.parse.urljoin(consul_url, '{0}/'.format(api_version)) url = urllib.parse.urljoin(base_url, function, False) if method == 'GET': data = None else: if data is None: data = {} data = salt.utils.json.dumps(data) result = salt.utils.http.query( url, method=method, params=query_params, data=data, decode=True, status=True, header_dict=headers, opts=__opts__, ) if result.get('status', None) == http_client.OK: ret['data'] = result.get('dict', result) ret['res'] = True elif result.get('status', None) == http_client.NO_CONTENT: ret['res'] = False elif result.get('status', None) == http_client.NOT_FOUND: ret['data'] = 'Key not found.' ret['res'] = False else: if result: ret['data'] = result ret['res'] = True else: ret['res'] = False return ret def list_(consul_url=None, token=None, key=None, **kwargs): ''' List keys in Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :return: The list of keys. CLI Example: .. code-block:: bash salt '*' consul.list salt '*' consul.list key='web' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret query_params = {} if 'recurse' in kwargs: query_params['recurse'] = 'True' # No key so recurse and show all values if not key: query_params['recurse'] = 'True' function = 'kv/' else: function = 'kv/{0}'.format(key) query_params['keys'] = 'True' query_params['separator'] = '/' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def get(consul_url=None, key=None, token=None, recurse=False, decode=False, raw=False): ''' Get key from Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param recurse: Return values recursively beginning at the value of key. :param decode: By default values are stored as Base64 encoded values, decode will return the whole key with the value decoded. :param raw: Simply return the decoded value of the key. :return: The keys in Consul. CLI Example: .. code-block:: bash salt '*' consul.get key='web/key1' salt '*' consul.get key='web' recurse=True salt '*' consul.get key='web' recurse=True decode=True By default values stored in Consul are base64 encoded, passing the decode option will show them as the decoded values. .. code-block:: bash salt '*' consul.get key='web' recurse=True decode=True raw=True By default Consult will return other information about the key, the raw option will return only the raw value. ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') query_params = {} function = 'kv/{0}'.format(key) if recurse: query_params['recurse'] = 'True' if raw: query_params['raw'] = True ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if ret['res']: if decode: for item in ret['data']: if item['Value'] is None: item['Value'] = "" else: item['Value'] = base64.b64decode(item['Value']) return ret def delete(consul_url=None, token=None, key=None, **kwargs): ''' Delete values from Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param recurse: Delete values recursively beginning at the value of key. :param cas: This flag is used to turn the DELETE into a Check-And-Set operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.delete key='web' salt '*' consul.delete key='web' recurse='True' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') query_params = {} if 'recurse' in kwargs: query_params['recurse'] = True if 'cas' in kwargs: if kwargs['cas'] > 0: query_params['cas'] = kwargs['cas'] else: ret['message'] = ('Check and Set Operation ', 'value must be greater than 0.') ret['res'] = False return ret function = 'kv/{0}'.format(key) ret = _query(consul_url=consul_url, token=token, function=function, method='DELETE', query_params=query_params) if ret['res']: ret['res'] = True ret['message'] = 'Deleted key {0}.'.format(key) else: ret['res'] = False ret['message'] = 'Unable to delete key {0}.'.format(key) return ret def agent_checks(consul_url=None, token=None): ''' Returns the checks the local agent is managing :param consul_url: The Consul server URL. :return: Returns the checks the local agent is managing CLI Example: .. code-block:: bash salt '*' consul.agent_checks ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/checks' ret = _query(consul_url=consul_url, function=function, token=token, method='GET') return ret def agent_services(consul_url=None, token=None): ''' Returns the services the local agent is managing :param consul_url: The Consul server URL. :return: Returns the services the local agent is managing CLI Example: .. code-block:: bash salt '*' consul.agent_services ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/services' ret = _query(consul_url=consul_url, function=function, token=token, method='GET') return ret def agent_members(consul_url=None, token=None, **kwargs): ''' Returns the members as seen by the local serf agent :param consul_url: The Consul server URL. :return: Returns the members as seen by the local serf agent CLI Example: .. code-block:: bash salt '*' consul.agent_members ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'wan' in kwargs: query_params['wan'] = kwargs['wan'] function = 'agent/members' ret = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) return ret def agent_self(consul_url=None, token=None): ''' Returns the local node configuration :param consul_url: The Consul server URL. :return: Returns the local node configuration CLI Example: .. code-block:: bash salt '*' consul.agent_self ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/self' ret = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) return ret def agent_maintenance(consul_url=None, token=None, **kwargs): ''' Manages node maintenance mode :param consul_url: The Consul server URL. :param enable: The enable flag is required. Acceptable values are either true (to enter maintenance mode) or false (to resume normal operation). :param reason: If provided, its value should be a text string explaining the reason for placing the node into maintenance mode. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_maintenance enable='False' reason='Upgrade in progress' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'enable' in kwargs: query_params['enable'] = kwargs['enable'] else: ret['message'] = 'Required parameter "enable" is missing.' ret['res'] = False return ret if 'reason' in kwargs: query_params['reason'] = kwargs['reason'] function = 'agent/maintenance' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = ('Agent maintenance mode ' '{0}ed.'.format(kwargs['enable'])) else: ret['res'] = True ret['message'] = 'Unable to change maintenance mode for agent.' return ret def agent_join(consul_url=None, token=None, address=None, **kwargs): ''' Triggers the local agent to join a node :param consul_url: The Consul server URL. :param address: The address for the agent to connect to. :param wan: Causes the agent to attempt to join using the WAN pool. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_join address='192.168.1.1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not address: raise SaltInvocationError('Required argument "address" is missing.') if 'wan' in kwargs: query_params['wan'] = kwargs['wan'] function = 'agent/join/{0}'.format(address) res = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Agent joined the cluster' else: ret['res'] = False ret['message'] = 'Unable to join the cluster.' return ret def agent_leave(consul_url=None, token=None, node=None): ''' Used to instruct the agent to force a node into the left state. :param consul_url: The Consul server URL. :param node: The node the agent will force into left state :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_leave node='web1.example.com' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') function = 'agent/force-leave/{0}'.format(node) res = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Node {0} put in leave state.'.format(node) else: ret['res'] = False ret['message'] = 'Unable to change state for {0}.'.format(node) return ret def agent_check_register(consul_url=None, token=None, **kwargs): ''' The register endpoint is used to add a new check to the local agent. :param consul_url: The Consul server URL. :param name: The description of what the check is for. :param id: The unique name to use for the check, if not provided 'name' is used. :param notes: Human readable description of the check. :param script: If script is provided, the check type is a script, and Consul will evaluate that script based on the interval parameter. :param http: Check will perform an HTTP GET request against the value of HTTP (expected to be a URL) based on the interval parameter. :param ttl: If a TTL type is used, then the TTL update endpoint must be used periodically to update the state of the check. :param interval: Interval at which the check should run. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_register name='Memory Utilization' script='/usr/local/bin/check_mem.py' interval='15s' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if True not in [True for item in ('script', 'http', 'ttl') if item in kwargs]: ret['message'] = 'Required parameter "script" or "http" is missing.' ret['res'] = False return ret if 'id' in kwargs: data['ID'] = kwargs['id'] if 'notes' in kwargs: data['Notes'] = kwargs['notes'] if 'script' in kwargs: if 'interval' not in kwargs: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret data['Script'] = kwargs['script'] data['Interval'] = kwargs['interval'] if 'http' in kwargs: if 'interval' not in kwargs: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret data['HTTP'] = kwargs['http'] data['Interval'] = kwargs['interval'] if 'ttl' in kwargs: data['TTL'] = kwargs['ttl'] function = 'agent/check/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = ('Check {0} added to agent.'.format(kwargs['name'])) else: ret['res'] = False ret['message'] = 'Unable to add check to agent.' return ret def agent_check_deregister(consul_url=None, token=None, checkid=None): ''' The agent will take care of deregistering the check from the Catalog. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_deregister checkid='Memory Utilization' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') function = 'agent/check/deregister/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, method='GET') if res['res']: ret['res'] = True ret['message'] = ('Check {0} removed from agent.'.format(checkid)) else: ret['res'] = False ret['message'] = 'Unable to remove check from agent.' return ret def agent_check_pass(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to passing and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to mark as passing. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_pass checkid='redis_check1' note='Forcing check into passing state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/pass/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as passing.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_check_warn(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to warning and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_warn checkid='redis_check1' note='Forcing check into warning state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/warn/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as warning.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_check_fail(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to critical and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_fail checkid='redis_check1' note='Forcing check into critical state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/fail/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as critical.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_service_register(consul_url=None, token=None, **kwargs): ''' The used to add a new service, with an optional health check, to the local agent. :param consul_url: The Consul server URL. :param name: A name describing the service. :param address: The address used by the service, defaults to the address of the agent. :param port: The port used by the service. :param id: Unique ID to identify the service, if not provided the value of the name parameter is used. :param tags: Identifying tags for service, string or list. :param script: If script is provided, the check type is a script, and Consul will evaluate that script based on the interval parameter. :param http: Check will perform an HTTP GET request against the value of HTTP (expected to be a URL) based on the interval parameter. :param check_ttl: If a TTL type is used, then the TTL update endpoint must be used periodically to update the state of the check. :param check_interval: Interval at which the check should run. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_register name='redis' tags='["master", "v1"]' address="127.0.0.1" port="8080" check_script="/usr/local/bin/check_redis.py" interval="10s" ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret lc_kwargs = dict() for k, v in six.iteritems(kwargs): lc_kwargs[k.lower()] = v if 'name' in lc_kwargs: data['Name'] = lc_kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'address' in lc_kwargs: data['Address'] = lc_kwargs['address'] if 'port' in lc_kwargs: data['Port'] = lc_kwargs['port'] if 'id' in lc_kwargs: data['ID'] = lc_kwargs['id'] if 'tags' in lc_kwargs: _tags = lc_kwargs['tags'] if not isinstance(_tags, list): _tags = [_tags] data['Tags'] = _tags if 'enabletagoverride' in lc_kwargs: data['EnableTagOverride'] = lc_kwargs['enabletagoverride'] if 'check' in lc_kwargs: dd = dict() for k, v in six.iteritems(lc_kwargs['check']): dd[k.lower()] = v interval_required = False check_dd = dict() if 'script' in dd: interval_required = True check_dd['Script'] = dd['script'] if 'http' in dd: interval_required = True check_dd['HTTP'] = dd['http'] if 'ttl' in dd: check_dd['TTL'] = dd['ttl'] if 'interval' in dd: check_dd['Interval'] = dd['interval'] if interval_required: if 'Interval' not in check_dd: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret else: if 'Interval' in check_dd: del check_dd['Interval'] # not required, so ignore it if check_dd > 0: data['Check'] = check_dd # if empty, ignore it function = 'agent/service/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Service {0} registered on agent.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to register service {0}.'.format(kwargs['name']) return ret def agent_service_deregister(consul_url=None, token=None, serviceid=None): ''' Used to remove a service. :param consul_url: The Consul server URL. :param serviceid: A serviceid describing the service. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_deregister serviceid='redis' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not serviceid: raise SaltInvocationError('Required argument "serviceid" is missing.') function = 'agent/service/deregister/{0}'.format(serviceid) res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Service {0} removed from agent.'.format(serviceid) else: ret['res'] = False ret['message'] = 'Unable to remove service {0}.'.format(serviceid) return ret def agent_service_maintenance(consul_url=None, token=None, serviceid=None, **kwargs): ''' Used to place a service into maintenance mode. :param consul_url: The Consul server URL. :param serviceid: A name of the service. :param enable: Whether the service should be enabled or disabled. :param reason: A human readable message of why the service was enabled or disabled. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_deregister serviceid='redis' enable='True' reason='Down for upgrade' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not serviceid: raise SaltInvocationError('Required argument "serviceid" is missing.') if 'enable' in kwargs: query_params['enable'] = kwargs['enable'] else: ret['message'] = 'Required parameter "enable" is missing.' ret['res'] = False return ret if 'reason' in kwargs: query_params['reason'] = kwargs['reason'] function = 'agent/service/maintenance/{0}'.format(serviceid) res = _query(consul_url=consul_url, token=token, function=function, query_params=query_params) if res['res']: ret['res'] = True ret['message'] = ('Service {0} set in ' 'maintenance mode.'.format(serviceid)) else: ret['res'] = False ret['message'] = ('Unable to set service ' '{0} to maintenance mode.'.format(serviceid)) return ret def session_create(consul_url=None, token=None, **kwargs): ''' Used to create a session. :param consul_url: The Consul server URL. :param lockdelay: Duration string using a "s" suffix for seconds. The default is 15s. :param node: Must refer to a node that is already registered, if specified. By default, the agent's own node name is used. :param name: A human-readable name for the session :param checks: A list of associated health checks. It is highly recommended that, if you override this list, you include the default "serfHealth". :param behavior: Can be set to either release or delete. This controls the behavior when a session is invalidated. By default, this is release, causing any locks that are held to be released. Changing this to delete causes any locks that are held to be deleted. delete is useful for creating ephemeral key/value entries. :param ttl: Session is invalidated if it is not renewed before the TTL expires :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_create node='node1' name='my-session' behavior='delete' ttl='3600s' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret data = {} if 'lockdelay' in kwargs: data['LockDelay'] = kwargs['lockdelay'] if 'node' in kwargs: data['Node'] = kwargs['node'] if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'checks' in kwargs: data['Touch'] = kwargs['touch'] if 'behavior' in kwargs: if not kwargs['behavior'] in ('delete', 'release'): ret['message'] = ('Behavior must be ', 'either delete or release.') ret['res'] = False return ret data['Behavior'] = kwargs['behavior'] if 'ttl' in kwargs: _ttl = kwargs['ttl'] if six.text_type(_ttl).endswith('s'): _ttl = _ttl[:-1] if int(_ttl) < 0 or int(_ttl) > 3600: ret['message'] = ('TTL must be ', 'between 0 and 3600.') ret['res'] = False return ret data['TTL'] = '{0}s'.format(_ttl) function = 'session/create' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Created session {0}.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to create session {0}.'.format(kwargs['name']) return ret def session_list(consul_url=None, token=None, return_list=False, **kwargs): ''' Used to list sessions. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param return_list: By default, all information about the sessions is returned, using the return_list parameter will return a list of session IDs. :return: A list of all available sessions. CLI Example: .. code-block:: bash salt '*' consul.session_list ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/list' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if return_list: _list = [] for item in ret['data']: _list.append(item['ID']) return _list return ret def session_destroy(consul_url=None, token=None, session=None, **kwargs): ''' Destroy session :param consul_url: The Consul server URL. :param session: The ID of the session to destroy. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_destroy session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not session: raise SaltInvocationError('Required argument "session" is missing.') query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/destroy/{0}'.format(session) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Created Service {0}.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to create service {0}.'.format(kwargs['name']) return ret def session_info(consul_url=None, token=None, session=None, **kwargs): ''' Information about a session :param consul_url: The Consul server URL. :param session: The ID of the session to return information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_info session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not session: raise SaltInvocationError('Required argument "session" is missing.') query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/info/{0}'.format(session) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_register(consul_url=None, token=None, **kwargs): ''' Registers a new node, service, or check :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param node: The node to register. :param address: The address of the node. :param service: The service that will be registered. :param service_address: The address that the service listens on. :param service_port: The port for the service. :param service_id: A unique identifier for the service, if this is not provided "name" will be used. :param service_tags: Any tags associated with the service. :param check: The name of the health check to register :param check_status: The initial status of the check, must be one of unknown, passing, warning, or critical. :param check_service: The service that the check is performed against. :param check_id: Unique identifier for the service. :param check_notes: An opaque field that is meant to hold human-readable text. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.catalog_register node='node1' address='192.168.1.1' service='redis' service_address='127.0.0.1' service_port='8080' service_id='redis_server1' ''' ret = {} data = {} data['NodeMeta'] = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'datacenter' in kwargs: data['Datacenter'] = kwargs['datacenter'] if 'node' in kwargs: data['Node'] = kwargs['node'] else: ret['message'] = 'Required argument node argument is missing.' ret['res'] = False return ret if 'address' in kwargs: if isinstance(kwargs['address'], list): _address = kwargs['address'][0] else: _address = kwargs['address'] data['Address'] = _address else: ret['message'] = 'Required argument address argument is missing.' ret['res'] = False return ret if 'ip_interfaces' in kwargs: data['TaggedAddresses'] = {} for k in kwargs['ip_interfaces']: if kwargs['ip_interfaces'].get(k): data['TaggedAddresses'][k] = kwargs['ip_interfaces'][k][0] if 'service' in kwargs: data['Service'] = {} data['Service']['Service'] = kwargs['service'] if 'service_address' in kwargs: data['Service']['Address'] = kwargs['service_address'] if 'service_port' in kwargs: data['Service']['Port'] = kwargs['service_port'] if 'service_id' in kwargs: data['Service']['ID'] = kwargs['service_id'] if 'service_tags' in kwargs: _tags = kwargs['service_tags'] if not isinstance(_tags, list): _tags = [_tags] data['Service']['Tags'] = _tags if 'cpu' in kwargs: data['NodeMeta']['Cpu'] = kwargs['cpu'] if 'num_cpus' in kwargs: data['NodeMeta']['Cpu_num'] = kwargs['num_cpus'] if 'mem' in kwargs: data['NodeMeta']['Memory'] = kwargs['mem'] if 'oscode' in kwargs: data['NodeMeta']['Os'] = kwargs['oscode'] if 'osarch' in kwargs: data['NodeMeta']['Osarch'] = kwargs['osarch'] if 'kernel' in kwargs: data['NodeMeta']['Kernel'] = kwargs['kernel'] if 'kernelrelease' in kwargs: data['NodeMeta']['Kernelrelease'] = kwargs['kernelrelease'] if 'localhost' in kwargs: data['NodeMeta']['localhost'] = kwargs['localhost'] if 'nodename' in kwargs: data['NodeMeta']['nodename'] = kwargs['nodename'] if 'os_family' in kwargs: data['NodeMeta']['os_family'] = kwargs['os_family'] if 'lsb_distrib_description' in kwargs: data['NodeMeta']['lsb_distrib_description'] = kwargs['lsb_distrib_description'] if 'master' in kwargs: data['NodeMeta']['master'] = kwargs['master'] if 'check' in kwargs: data['Check'] = {} data['Check']['Name'] = kwargs['check'] if 'check_status' in kwargs: if kwargs['check_status'] not in ('unknown', 'passing', 'warning', 'critical'): ret['message'] = 'Check status must be unknown, passing, warning, or critical.' ret['res'] = False return ret data['Check']['Status'] = kwargs['check_status'] if 'check_service' in kwargs: data['Check']['ServiceID'] = kwargs['check_service'] if 'check_id' in kwargs: data['Check']['CheckID'] = kwargs['check_id'] if 'check_notes' in kwargs: data['Check']['Notes'] = kwargs['check_notes'] function = 'catalog/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = ('Catalog registration ' 'for {0} successful.'.format(kwargs['node'])) else: ret['res'] = False ret['message'] = ('Catalog registration ' 'for {0} failed.'.format(kwargs['node'])) ret['data'] = data return ret def catalog_deregister(consul_url=None, token=None, **kwargs): ''' Deregisters a node, service, or check :param consul_url: The Consul server URL. :param node: The node to deregister. :param datacenter: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param checkid: The ID of the health check to deregister. :param serviceid: The ID of the service to deregister. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.catalog_register node='node1' serviceid='redis_server1' checkid='redis_check1' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'datacenter' in kwargs: data['Datacenter'] = kwargs['datacenter'] if 'node' in kwargs: data['Node'] = kwargs['node'] else: ret['message'] = 'Node argument required.' ret['res'] = False return ret if 'checkid' in kwargs: data['CheckID'] = kwargs['checkid'] if 'serviceid' in kwargs: data['ServiceID'] = kwargs['serviceid'] function = 'catalog/deregister' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Catalog item {0} removed.'.format(kwargs['node']) else: ret['res'] = False ret['message'] = ('Removing Catalog ' 'item {0} failed.'.format(kwargs['node'])) return ret def catalog_datacenters(consul_url=None, token=None): ''' Return list of available datacenters from catalog. :param consul_url: The Consul server URL. :return: The list of available datacenters. CLI Example: .. code-block:: bash salt '*' consul.catalog_datacenters ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'catalog/datacenters' ret = _query(consul_url=consul_url, function=function, token=token) return ret def catalog_nodes(consul_url=None, token=None, **kwargs): ''' Return list of available nodes from catalog. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The list of available nodes. CLI Example: .. code-block:: bash salt '*' consul.catalog_nodes ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/nodes' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_services(consul_url=None, token=None, **kwargs): ''' Return list of available services rom catalog. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The list of available services. CLI Example: .. code-block:: bash salt '*' consul.catalog_services ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/services' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_service(consul_url=None, token=None, service=None, **kwargs): ''' Information about the registered service. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param tag: Filter returned services with tag parameter. :return: Information about the requested service. CLI Example: .. code-block:: bash salt '*' consul.catalog_service service='redis' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if 'tag' in kwargs: query_params['tag'] = kwargs['tag'] function = 'catalog/service/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_node(consul_url=None, token=None, node=None, **kwargs): ''' Information about the registered node. :param consul_url: The Consul server URL. :param node: The node to request information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.catalog_service service='redis' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/node/{0}'.format(node) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_node(consul_url=None, token=None, node=None, **kwargs): ''' Health information about the registered node. :param consul_url: The Consul server URL. :param node: The node to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_node node='node1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'health/node/{0}'.format(node) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_checks(consul_url=None, token=None, service=None, **kwargs): ''' Health information about the registered service. :param consul_url: The Consul server URL. :param service: The service to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_checks service='redis1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'health/checks/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_service(consul_url=None, token=None, service=None, **kwargs): ''' Health information about the registered service. :param consul_url: The Consul server URL. :param service: The service to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param tag: Filter returned services with tag parameter. :param passing: Filter results to only nodes with all checks in the passing state. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_service service='redis1' salt '*' consul.health_service service='redis1' passing='True' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if 'tag' in kwargs: query_params['tag'] = kwargs['tag'] if 'passing' in kwargs: query_params['passing'] = kwargs['passing'] function = 'health/service/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_state(consul_url=None, token=None, state=None, **kwargs): ''' Returns the checks in the state provided on the path. :param consul_url: The Consul server URL. :param state: The state to show checks for. The supported states are any, unknown, passing, warning, or critical. The any state is a wildcard that can be used to return all checks. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The checks in the provided state. CLI Example: .. code-block:: bash salt '*' consul.health_state state='redis1' salt '*' consul.health_state service='redis1' passing='True' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not state: raise SaltInvocationError('Required argument "state" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if state not in ('any', 'unknown', 'passing', 'warning', 'critical'): ret['message'] = 'State must be any, unknown, passing, warning, or critical.' ret['res'] = False return ret function = 'health/state/{0}'.format(state) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def status_leader(consul_url=None, token=None): ''' Returns the current Raft leader :param consul_url: The Consul server URL. :return: The address of the Raft leader. CLI Example: .. code-block:: bash salt '*' consul.status_leader ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'status/leader' ret = _query(consul_url=consul_url, function=function, token=token) return ret def status_peers(consul_url, token=None): ''' Returns the current Raft peer set :param consul_url: The Consul server URL. :return: Retrieves the Raft peers for the datacenter in which the agent is running. CLI Example: .. code-block:: bash salt '*' consul.status_peers ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'status/peers' ret = _query(consul_url=consul_url, function=function, token=token) return ret def acl_create(consul_url=None, token=None, **kwargs): ''' Create a new ACL token. :param consul_url: The Consul server URL. :param name: Meaningful indicator of the ACL's purpose. :param type: Type is either client or management. A management token is comparable to a root user and has the ability to perform any action including creating, modifying, and deleting ACLs. :param rules: The Consul server URL. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_create ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'type' in kwargs: data['Type'] = kwargs['type'] if 'rules' in kwargs: data['Rules'] = kwargs['rules'] function = 'acl/create' res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} created.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = ('Removing Catalog ' 'item {0} failed.'.format(kwargs['name'])) return ret def acl_update(consul_url=None, token=None, **kwargs): ''' Update an ACL token. :param consul_url: The Consul server URL. :param name: Meaningful indicator of the ACL's purpose. :param id: Unique identifier for the ACL to update. :param type: Type is either client or management. A management token is comparable to a root user and has the ability to perform any action including creating, modifying, and deleting ACLs. :param rules: The Consul server URL. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_update ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' in kwargs: data['ID'] = kwargs['id'] else: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'type' in kwargs: data['Type'] = kwargs['type'] if 'rules' in kwargs: data['Rules'] = kwargs['rules'] function = 'acl/update' res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} created.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = ('Adding ACL ' '{0} failed.'.format(kwargs['name'])) return ret def acl_delete(consul_url=None, token=None, **kwargs): ''' Delete an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_delete id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/delete/{0}'.format(kwargs['id']) res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} deleted.'.format(kwargs['id']) else: ret['res'] = False ret['message'] = ('Removing ACL ' '{0} failed.'.format(kwargs['id'])) return ret def acl_info(consul_url=None, **kwargs): ''' Information about an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Information about the ACL requested. CLI Example: .. code-block:: bash salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/info/{0}'.format(kwargs['id']) ret = _query(consul_url=consul_url, data=data, method='GET', function=function) return ret def acl_clone(consul_url=None, token=None, **kwargs): ''' Information about an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Boolean, message of success or failure, and new ID of cloned ACL. CLI Example: .. code-block:: bash salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/clone/{0}'.format(kwargs['id']) res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} cloned.'.format(kwargs['name']) ret['ID'] = ret['data'] else: ret['res'] = False ret['message'] = ('Cloning ACL' 'item {0} failed.'.format(kwargs['name'])) return ret def acl_list(consul_url=None, token=None, **kwargs): ''' List the ACL tokens. :param consul_url: The Consul server URL. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.acl_list ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/list' ret = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) return ret def event_fire(consul_url=None, token=None, name=None, **kwargs): ''' List the ACL tokens. :param consul_url: The Consul server URL. :param name: The name of the event to fire. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param node: Filter by node name. :param service: Filter by service name. :param tag: Filter by tag name. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.event_fire name='deploy' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not name: raise SaltInvocationError('Required argument "name" is missing.') if 'dc' in kwargs: query_params = kwargs['dc'] if 'node' in kwargs: query_params = kwargs['node'] if 'service' in kwargs: query_params = kwargs['service'] if 'tag' in kwargs: query_params = kwargs['tag'] function = 'event/fire/{0}'.format(name) res = _query(consul_url=consul_url, token=token, query_params=query_params, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'Event {0} fired.'.format(name) ret['data'] = ret['data'] else: ret['res'] = False ret['message'] = ('Cloning ACL' 'item {0} failed.'.format(kwargs['name'])) return ret def event_list(consul_url=None, token=None, **kwargs): ''' List the recent events. :param consul_url: The Consul server URL. :param name: The name of the event to fire. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.event_list ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: query_params = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') function = 'event/list/' ret = _query(consul_url=consul_url, token=token, query_params=query_params, function=function) return ret
saltstack/salt
salt/modules/consul.py
delete
python
def delete(consul_url=None, token=None, key=None, **kwargs): ''' Delete values from Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param recurse: Delete values recursively beginning at the value of key. :param cas: This flag is used to turn the DELETE into a Check-And-Set operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.delete key='web' salt '*' consul.delete key='web' recurse='True' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') query_params = {} if 'recurse' in kwargs: query_params['recurse'] = True if 'cas' in kwargs: if kwargs['cas'] > 0: query_params['cas'] = kwargs['cas'] else: ret['message'] = ('Check and Set Operation ', 'value must be greater than 0.') ret['res'] = False return ret function = 'kv/{0}'.format(key) ret = _query(consul_url=consul_url, token=token, function=function, method='DELETE', query_params=query_params) if ret['res']: ret['res'] = True ret['message'] = 'Deleted key {0}.'.format(key) else: ret['res'] = False ret['message'] = 'Unable to delete key {0}.'.format(key) return ret
Delete values from Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param recurse: Delete values recursively beginning at the value of key. :param cas: This flag is used to turn the DELETE into a Check-And-Set operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.delete key='web' salt '*' consul.delete key='web' recurse='True'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/consul.py#L348-L406
[ "def _query(function,\n consul_url,\n token=None,\n method='GET',\n api_version='v1',\n data=None,\n query_params=None):\n '''\n Consul object method function to construct and execute on the API URL.\n\n :param api_url: The Consul api url.\n :param api_version The Consul api version\n :param function: The Consul api function to perform.\n :param method: The HTTP method, e.g. GET or POST.\n :param data: The data to be sent for POST method. This param is ignored for GET requests.\n :return: The json response from the API call or False.\n '''\n\n if not query_params:\n query_params = {}\n\n ret = {'data': '',\n 'res': True}\n\n if not token:\n token = _get_token()\n\n headers = {\"X-Consul-Token\": token, \"Content-Type\": \"application/json\"}\n base_url = urllib.parse.urljoin(consul_url, '{0}/'.format(api_version))\n url = urllib.parse.urljoin(base_url, function, False)\n\n if method == 'GET':\n data = None\n else:\n if data is None:\n data = {}\n data = salt.utils.json.dumps(data)\n\n result = salt.utils.http.query(\n url,\n method=method,\n params=query_params,\n data=data,\n decode=True,\n status=True,\n header_dict=headers,\n opts=__opts__,\n )\n\n if result.get('status', None) == http_client.OK:\n ret['data'] = result.get('dict', result)\n ret['res'] = True\n elif result.get('status', None) == http_client.NO_CONTENT:\n ret['res'] = False\n elif result.get('status', None) == http_client.NOT_FOUND:\n ret['data'] = 'Key not found.'\n ret['res'] = False\n else:\n if result:\n ret['data'] = result\n ret['res'] = True\n else:\n ret['res'] = False\n return ret\n", "def _get_config():\n '''\n Retrieve Consul configuration\n '''\n return __salt__['config.get']('consul.url') or \\\n __salt__['config.get']('consul:url')\n" ]
# -*- coding: utf-8 -*- ''' Interact with Consul https://www.consul.io ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import base64 import logging # Import salt libs import salt.utils.http import salt.utils.json # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import http_client, urllib log = logging.getLogger(__name__) from salt.exceptions import SaltInvocationError # Don't shadow built-ins. __func_alias__ = { 'list_': 'list' } __virtualname__ = 'consul' def _get_config(): ''' Retrieve Consul configuration ''' return __salt__['config.get']('consul.url') or \ __salt__['config.get']('consul:url') def _get_token(): ''' Retrieve Consul configuration ''' return __salt__['config.get']('consul.token') or \ __salt__['config.get']('consul:token') def _query(function, consul_url, token=None, method='GET', api_version='v1', data=None, query_params=None): ''' Consul object method function to construct and execute on the API URL. :param api_url: The Consul api url. :param api_version The Consul api version :param function: The Consul api function to perform. :param method: The HTTP method, e.g. GET or POST. :param data: The data to be sent for POST method. This param is ignored for GET requests. :return: The json response from the API call or False. ''' if not query_params: query_params = {} ret = {'data': '', 'res': True} if not token: token = _get_token() headers = {"X-Consul-Token": token, "Content-Type": "application/json"} base_url = urllib.parse.urljoin(consul_url, '{0}/'.format(api_version)) url = urllib.parse.urljoin(base_url, function, False) if method == 'GET': data = None else: if data is None: data = {} data = salt.utils.json.dumps(data) result = salt.utils.http.query( url, method=method, params=query_params, data=data, decode=True, status=True, header_dict=headers, opts=__opts__, ) if result.get('status', None) == http_client.OK: ret['data'] = result.get('dict', result) ret['res'] = True elif result.get('status', None) == http_client.NO_CONTENT: ret['res'] = False elif result.get('status', None) == http_client.NOT_FOUND: ret['data'] = 'Key not found.' ret['res'] = False else: if result: ret['data'] = result ret['res'] = True else: ret['res'] = False return ret def list_(consul_url=None, token=None, key=None, **kwargs): ''' List keys in Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :return: The list of keys. CLI Example: .. code-block:: bash salt '*' consul.list salt '*' consul.list key='web' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret query_params = {} if 'recurse' in kwargs: query_params['recurse'] = 'True' # No key so recurse and show all values if not key: query_params['recurse'] = 'True' function = 'kv/' else: function = 'kv/{0}'.format(key) query_params['keys'] = 'True' query_params['separator'] = '/' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def get(consul_url=None, key=None, token=None, recurse=False, decode=False, raw=False): ''' Get key from Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param recurse: Return values recursively beginning at the value of key. :param decode: By default values are stored as Base64 encoded values, decode will return the whole key with the value decoded. :param raw: Simply return the decoded value of the key. :return: The keys in Consul. CLI Example: .. code-block:: bash salt '*' consul.get key='web/key1' salt '*' consul.get key='web' recurse=True salt '*' consul.get key='web' recurse=True decode=True By default values stored in Consul are base64 encoded, passing the decode option will show them as the decoded values. .. code-block:: bash salt '*' consul.get key='web' recurse=True decode=True raw=True By default Consult will return other information about the key, the raw option will return only the raw value. ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') query_params = {} function = 'kv/{0}'.format(key) if recurse: query_params['recurse'] = 'True' if raw: query_params['raw'] = True ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if ret['res']: if decode: for item in ret['data']: if item['Value'] is None: item['Value'] = "" else: item['Value'] = base64.b64decode(item['Value']) return ret def put(consul_url=None, token=None, key=None, value=None, **kwargs): ''' Put values into Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param value: The value to set the key to. :param flags: This can be used to specify an unsigned value between 0 and 2^64-1. Clients can choose to use this however makes sense for their application. :param cas: This flag is used to turn the PUT into a Check-And-Set operation. :param acquire: This flag is used to turn the PUT into a lock acquisition operation. :param release: This flag is used to turn the PUT into a lock release operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.put key='web/key1' value="Hello there" salt '*' consul.put key='web/key1' value="Hello there" acquire='d5d371f4-c380-5280-12fd-8810be175592' salt '*' consul.put key='web/key1' value="Hello there" release='d5d371f4-c380-5280-12fd-8810be175592' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') # Invalid to specified these together conflicting_args = ['cas', 'release', 'acquire'] for _l1 in conflicting_args: for _l2 in conflicting_args: if _l1 in kwargs and _l2 in kwargs and _l1 != _l2: raise SaltInvocationError('Using arguments `{0}` and `{1}`' ' together is invalid.'.format(_l1, _l2)) query_params = {} available_sessions = session_list(consul_url=consul_url, return_list=True) _current = get(consul_url=consul_url, key=key) if 'flags' in kwargs: if kwargs['flags'] >= 0 and kwargs['flags'] <= 2**64: query_params['flags'] = kwargs['flags'] if 'cas' in kwargs: if _current['res']: if kwargs['cas'] == 0: ret['message'] = ('Key {0} exists, index ' 'must be non-zero.'.format(key)) ret['res'] = False return ret if kwargs['cas'] != _current['data']['ModifyIndex']: ret['message'] = ('Key {0} exists, but indexes ' 'do not match.'.format(key)) ret['res'] = False return ret query_params['cas'] = kwargs['cas'] else: ret['message'] = ('Key {0} does not exists, ' 'CAS argument can not be used.'.format(key)) ret['res'] = False return ret if 'acquire' in kwargs: if kwargs['acquire'] not in available_sessions: ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire']) ret['res'] = False return ret query_params['acquire'] = kwargs['acquire'] if 'release' in kwargs: if _current['res']: if 'Session' in _current['data']: if _current['data']['Session'] == kwargs['release']: query_params['release'] = kwargs['release'] else: ret['message'] = '{0} locked by another session.'.format(key) ret['res'] = False return ret else: ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire']) ret['res'] = False else: log.error('Key {0} does not exist. Skipping release.') data = value function = 'kv/{0}'.format(key) method = 'PUT' ret = _query(consul_url=consul_url, token=token, function=function, method=method, data=data, query_params=query_params) if ret['res']: ret['res'] = True ret['data'] = 'Added key {0} with value {1}.'.format(key, value) else: ret['res'] = False ret['data'] = 'Unable to add key {0} with value {1}.'.format(key, value) return ret def agent_checks(consul_url=None, token=None): ''' Returns the checks the local agent is managing :param consul_url: The Consul server URL. :return: Returns the checks the local agent is managing CLI Example: .. code-block:: bash salt '*' consul.agent_checks ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/checks' ret = _query(consul_url=consul_url, function=function, token=token, method='GET') return ret def agent_services(consul_url=None, token=None): ''' Returns the services the local agent is managing :param consul_url: The Consul server URL. :return: Returns the services the local agent is managing CLI Example: .. code-block:: bash salt '*' consul.agent_services ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/services' ret = _query(consul_url=consul_url, function=function, token=token, method='GET') return ret def agent_members(consul_url=None, token=None, **kwargs): ''' Returns the members as seen by the local serf agent :param consul_url: The Consul server URL. :return: Returns the members as seen by the local serf agent CLI Example: .. code-block:: bash salt '*' consul.agent_members ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'wan' in kwargs: query_params['wan'] = kwargs['wan'] function = 'agent/members' ret = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) return ret def agent_self(consul_url=None, token=None): ''' Returns the local node configuration :param consul_url: The Consul server URL. :return: Returns the local node configuration CLI Example: .. code-block:: bash salt '*' consul.agent_self ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/self' ret = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) return ret def agent_maintenance(consul_url=None, token=None, **kwargs): ''' Manages node maintenance mode :param consul_url: The Consul server URL. :param enable: The enable flag is required. Acceptable values are either true (to enter maintenance mode) or false (to resume normal operation). :param reason: If provided, its value should be a text string explaining the reason for placing the node into maintenance mode. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_maintenance enable='False' reason='Upgrade in progress' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'enable' in kwargs: query_params['enable'] = kwargs['enable'] else: ret['message'] = 'Required parameter "enable" is missing.' ret['res'] = False return ret if 'reason' in kwargs: query_params['reason'] = kwargs['reason'] function = 'agent/maintenance' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = ('Agent maintenance mode ' '{0}ed.'.format(kwargs['enable'])) else: ret['res'] = True ret['message'] = 'Unable to change maintenance mode for agent.' return ret def agent_join(consul_url=None, token=None, address=None, **kwargs): ''' Triggers the local agent to join a node :param consul_url: The Consul server URL. :param address: The address for the agent to connect to. :param wan: Causes the agent to attempt to join using the WAN pool. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_join address='192.168.1.1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not address: raise SaltInvocationError('Required argument "address" is missing.') if 'wan' in kwargs: query_params['wan'] = kwargs['wan'] function = 'agent/join/{0}'.format(address) res = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Agent joined the cluster' else: ret['res'] = False ret['message'] = 'Unable to join the cluster.' return ret def agent_leave(consul_url=None, token=None, node=None): ''' Used to instruct the agent to force a node into the left state. :param consul_url: The Consul server URL. :param node: The node the agent will force into left state :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_leave node='web1.example.com' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') function = 'agent/force-leave/{0}'.format(node) res = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Node {0} put in leave state.'.format(node) else: ret['res'] = False ret['message'] = 'Unable to change state for {0}.'.format(node) return ret def agent_check_register(consul_url=None, token=None, **kwargs): ''' The register endpoint is used to add a new check to the local agent. :param consul_url: The Consul server URL. :param name: The description of what the check is for. :param id: The unique name to use for the check, if not provided 'name' is used. :param notes: Human readable description of the check. :param script: If script is provided, the check type is a script, and Consul will evaluate that script based on the interval parameter. :param http: Check will perform an HTTP GET request against the value of HTTP (expected to be a URL) based on the interval parameter. :param ttl: If a TTL type is used, then the TTL update endpoint must be used periodically to update the state of the check. :param interval: Interval at which the check should run. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_register name='Memory Utilization' script='/usr/local/bin/check_mem.py' interval='15s' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if True not in [True for item in ('script', 'http', 'ttl') if item in kwargs]: ret['message'] = 'Required parameter "script" or "http" is missing.' ret['res'] = False return ret if 'id' in kwargs: data['ID'] = kwargs['id'] if 'notes' in kwargs: data['Notes'] = kwargs['notes'] if 'script' in kwargs: if 'interval' not in kwargs: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret data['Script'] = kwargs['script'] data['Interval'] = kwargs['interval'] if 'http' in kwargs: if 'interval' not in kwargs: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret data['HTTP'] = kwargs['http'] data['Interval'] = kwargs['interval'] if 'ttl' in kwargs: data['TTL'] = kwargs['ttl'] function = 'agent/check/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = ('Check {0} added to agent.'.format(kwargs['name'])) else: ret['res'] = False ret['message'] = 'Unable to add check to agent.' return ret def agent_check_deregister(consul_url=None, token=None, checkid=None): ''' The agent will take care of deregistering the check from the Catalog. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_deregister checkid='Memory Utilization' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') function = 'agent/check/deregister/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, method='GET') if res['res']: ret['res'] = True ret['message'] = ('Check {0} removed from agent.'.format(checkid)) else: ret['res'] = False ret['message'] = 'Unable to remove check from agent.' return ret def agent_check_pass(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to passing and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to mark as passing. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_pass checkid='redis_check1' note='Forcing check into passing state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/pass/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as passing.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_check_warn(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to warning and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_warn checkid='redis_check1' note='Forcing check into warning state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/warn/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as warning.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_check_fail(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to critical and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_fail checkid='redis_check1' note='Forcing check into critical state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/fail/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as critical.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_service_register(consul_url=None, token=None, **kwargs): ''' The used to add a new service, with an optional health check, to the local agent. :param consul_url: The Consul server URL. :param name: A name describing the service. :param address: The address used by the service, defaults to the address of the agent. :param port: The port used by the service. :param id: Unique ID to identify the service, if not provided the value of the name parameter is used. :param tags: Identifying tags for service, string or list. :param script: If script is provided, the check type is a script, and Consul will evaluate that script based on the interval parameter. :param http: Check will perform an HTTP GET request against the value of HTTP (expected to be a URL) based on the interval parameter. :param check_ttl: If a TTL type is used, then the TTL update endpoint must be used periodically to update the state of the check. :param check_interval: Interval at which the check should run. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_register name='redis' tags='["master", "v1"]' address="127.0.0.1" port="8080" check_script="/usr/local/bin/check_redis.py" interval="10s" ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret lc_kwargs = dict() for k, v in six.iteritems(kwargs): lc_kwargs[k.lower()] = v if 'name' in lc_kwargs: data['Name'] = lc_kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'address' in lc_kwargs: data['Address'] = lc_kwargs['address'] if 'port' in lc_kwargs: data['Port'] = lc_kwargs['port'] if 'id' in lc_kwargs: data['ID'] = lc_kwargs['id'] if 'tags' in lc_kwargs: _tags = lc_kwargs['tags'] if not isinstance(_tags, list): _tags = [_tags] data['Tags'] = _tags if 'enabletagoverride' in lc_kwargs: data['EnableTagOverride'] = lc_kwargs['enabletagoverride'] if 'check' in lc_kwargs: dd = dict() for k, v in six.iteritems(lc_kwargs['check']): dd[k.lower()] = v interval_required = False check_dd = dict() if 'script' in dd: interval_required = True check_dd['Script'] = dd['script'] if 'http' in dd: interval_required = True check_dd['HTTP'] = dd['http'] if 'ttl' in dd: check_dd['TTL'] = dd['ttl'] if 'interval' in dd: check_dd['Interval'] = dd['interval'] if interval_required: if 'Interval' not in check_dd: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret else: if 'Interval' in check_dd: del check_dd['Interval'] # not required, so ignore it if check_dd > 0: data['Check'] = check_dd # if empty, ignore it function = 'agent/service/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Service {0} registered on agent.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to register service {0}.'.format(kwargs['name']) return ret def agent_service_deregister(consul_url=None, token=None, serviceid=None): ''' Used to remove a service. :param consul_url: The Consul server URL. :param serviceid: A serviceid describing the service. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_deregister serviceid='redis' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not serviceid: raise SaltInvocationError('Required argument "serviceid" is missing.') function = 'agent/service/deregister/{0}'.format(serviceid) res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Service {0} removed from agent.'.format(serviceid) else: ret['res'] = False ret['message'] = 'Unable to remove service {0}.'.format(serviceid) return ret def agent_service_maintenance(consul_url=None, token=None, serviceid=None, **kwargs): ''' Used to place a service into maintenance mode. :param consul_url: The Consul server URL. :param serviceid: A name of the service. :param enable: Whether the service should be enabled or disabled. :param reason: A human readable message of why the service was enabled or disabled. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_deregister serviceid='redis' enable='True' reason='Down for upgrade' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not serviceid: raise SaltInvocationError('Required argument "serviceid" is missing.') if 'enable' in kwargs: query_params['enable'] = kwargs['enable'] else: ret['message'] = 'Required parameter "enable" is missing.' ret['res'] = False return ret if 'reason' in kwargs: query_params['reason'] = kwargs['reason'] function = 'agent/service/maintenance/{0}'.format(serviceid) res = _query(consul_url=consul_url, token=token, function=function, query_params=query_params) if res['res']: ret['res'] = True ret['message'] = ('Service {0} set in ' 'maintenance mode.'.format(serviceid)) else: ret['res'] = False ret['message'] = ('Unable to set service ' '{0} to maintenance mode.'.format(serviceid)) return ret def session_create(consul_url=None, token=None, **kwargs): ''' Used to create a session. :param consul_url: The Consul server URL. :param lockdelay: Duration string using a "s" suffix for seconds. The default is 15s. :param node: Must refer to a node that is already registered, if specified. By default, the agent's own node name is used. :param name: A human-readable name for the session :param checks: A list of associated health checks. It is highly recommended that, if you override this list, you include the default "serfHealth". :param behavior: Can be set to either release or delete. This controls the behavior when a session is invalidated. By default, this is release, causing any locks that are held to be released. Changing this to delete causes any locks that are held to be deleted. delete is useful for creating ephemeral key/value entries. :param ttl: Session is invalidated if it is not renewed before the TTL expires :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_create node='node1' name='my-session' behavior='delete' ttl='3600s' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret data = {} if 'lockdelay' in kwargs: data['LockDelay'] = kwargs['lockdelay'] if 'node' in kwargs: data['Node'] = kwargs['node'] if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'checks' in kwargs: data['Touch'] = kwargs['touch'] if 'behavior' in kwargs: if not kwargs['behavior'] in ('delete', 'release'): ret['message'] = ('Behavior must be ', 'either delete or release.') ret['res'] = False return ret data['Behavior'] = kwargs['behavior'] if 'ttl' in kwargs: _ttl = kwargs['ttl'] if six.text_type(_ttl).endswith('s'): _ttl = _ttl[:-1] if int(_ttl) < 0 or int(_ttl) > 3600: ret['message'] = ('TTL must be ', 'between 0 and 3600.') ret['res'] = False return ret data['TTL'] = '{0}s'.format(_ttl) function = 'session/create' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Created session {0}.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to create session {0}.'.format(kwargs['name']) return ret def session_list(consul_url=None, token=None, return_list=False, **kwargs): ''' Used to list sessions. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param return_list: By default, all information about the sessions is returned, using the return_list parameter will return a list of session IDs. :return: A list of all available sessions. CLI Example: .. code-block:: bash salt '*' consul.session_list ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/list' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if return_list: _list = [] for item in ret['data']: _list.append(item['ID']) return _list return ret def session_destroy(consul_url=None, token=None, session=None, **kwargs): ''' Destroy session :param consul_url: The Consul server URL. :param session: The ID of the session to destroy. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_destroy session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not session: raise SaltInvocationError('Required argument "session" is missing.') query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/destroy/{0}'.format(session) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Created Service {0}.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to create service {0}.'.format(kwargs['name']) return ret def session_info(consul_url=None, token=None, session=None, **kwargs): ''' Information about a session :param consul_url: The Consul server URL. :param session: The ID of the session to return information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_info session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not session: raise SaltInvocationError('Required argument "session" is missing.') query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/info/{0}'.format(session) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_register(consul_url=None, token=None, **kwargs): ''' Registers a new node, service, or check :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param node: The node to register. :param address: The address of the node. :param service: The service that will be registered. :param service_address: The address that the service listens on. :param service_port: The port for the service. :param service_id: A unique identifier for the service, if this is not provided "name" will be used. :param service_tags: Any tags associated with the service. :param check: The name of the health check to register :param check_status: The initial status of the check, must be one of unknown, passing, warning, or critical. :param check_service: The service that the check is performed against. :param check_id: Unique identifier for the service. :param check_notes: An opaque field that is meant to hold human-readable text. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.catalog_register node='node1' address='192.168.1.1' service='redis' service_address='127.0.0.1' service_port='8080' service_id='redis_server1' ''' ret = {} data = {} data['NodeMeta'] = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'datacenter' in kwargs: data['Datacenter'] = kwargs['datacenter'] if 'node' in kwargs: data['Node'] = kwargs['node'] else: ret['message'] = 'Required argument node argument is missing.' ret['res'] = False return ret if 'address' in kwargs: if isinstance(kwargs['address'], list): _address = kwargs['address'][0] else: _address = kwargs['address'] data['Address'] = _address else: ret['message'] = 'Required argument address argument is missing.' ret['res'] = False return ret if 'ip_interfaces' in kwargs: data['TaggedAddresses'] = {} for k in kwargs['ip_interfaces']: if kwargs['ip_interfaces'].get(k): data['TaggedAddresses'][k] = kwargs['ip_interfaces'][k][0] if 'service' in kwargs: data['Service'] = {} data['Service']['Service'] = kwargs['service'] if 'service_address' in kwargs: data['Service']['Address'] = kwargs['service_address'] if 'service_port' in kwargs: data['Service']['Port'] = kwargs['service_port'] if 'service_id' in kwargs: data['Service']['ID'] = kwargs['service_id'] if 'service_tags' in kwargs: _tags = kwargs['service_tags'] if not isinstance(_tags, list): _tags = [_tags] data['Service']['Tags'] = _tags if 'cpu' in kwargs: data['NodeMeta']['Cpu'] = kwargs['cpu'] if 'num_cpus' in kwargs: data['NodeMeta']['Cpu_num'] = kwargs['num_cpus'] if 'mem' in kwargs: data['NodeMeta']['Memory'] = kwargs['mem'] if 'oscode' in kwargs: data['NodeMeta']['Os'] = kwargs['oscode'] if 'osarch' in kwargs: data['NodeMeta']['Osarch'] = kwargs['osarch'] if 'kernel' in kwargs: data['NodeMeta']['Kernel'] = kwargs['kernel'] if 'kernelrelease' in kwargs: data['NodeMeta']['Kernelrelease'] = kwargs['kernelrelease'] if 'localhost' in kwargs: data['NodeMeta']['localhost'] = kwargs['localhost'] if 'nodename' in kwargs: data['NodeMeta']['nodename'] = kwargs['nodename'] if 'os_family' in kwargs: data['NodeMeta']['os_family'] = kwargs['os_family'] if 'lsb_distrib_description' in kwargs: data['NodeMeta']['lsb_distrib_description'] = kwargs['lsb_distrib_description'] if 'master' in kwargs: data['NodeMeta']['master'] = kwargs['master'] if 'check' in kwargs: data['Check'] = {} data['Check']['Name'] = kwargs['check'] if 'check_status' in kwargs: if kwargs['check_status'] not in ('unknown', 'passing', 'warning', 'critical'): ret['message'] = 'Check status must be unknown, passing, warning, or critical.' ret['res'] = False return ret data['Check']['Status'] = kwargs['check_status'] if 'check_service' in kwargs: data['Check']['ServiceID'] = kwargs['check_service'] if 'check_id' in kwargs: data['Check']['CheckID'] = kwargs['check_id'] if 'check_notes' in kwargs: data['Check']['Notes'] = kwargs['check_notes'] function = 'catalog/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = ('Catalog registration ' 'for {0} successful.'.format(kwargs['node'])) else: ret['res'] = False ret['message'] = ('Catalog registration ' 'for {0} failed.'.format(kwargs['node'])) ret['data'] = data return ret def catalog_deregister(consul_url=None, token=None, **kwargs): ''' Deregisters a node, service, or check :param consul_url: The Consul server URL. :param node: The node to deregister. :param datacenter: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param checkid: The ID of the health check to deregister. :param serviceid: The ID of the service to deregister. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.catalog_register node='node1' serviceid='redis_server1' checkid='redis_check1' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'datacenter' in kwargs: data['Datacenter'] = kwargs['datacenter'] if 'node' in kwargs: data['Node'] = kwargs['node'] else: ret['message'] = 'Node argument required.' ret['res'] = False return ret if 'checkid' in kwargs: data['CheckID'] = kwargs['checkid'] if 'serviceid' in kwargs: data['ServiceID'] = kwargs['serviceid'] function = 'catalog/deregister' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Catalog item {0} removed.'.format(kwargs['node']) else: ret['res'] = False ret['message'] = ('Removing Catalog ' 'item {0} failed.'.format(kwargs['node'])) return ret def catalog_datacenters(consul_url=None, token=None): ''' Return list of available datacenters from catalog. :param consul_url: The Consul server URL. :return: The list of available datacenters. CLI Example: .. code-block:: bash salt '*' consul.catalog_datacenters ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'catalog/datacenters' ret = _query(consul_url=consul_url, function=function, token=token) return ret def catalog_nodes(consul_url=None, token=None, **kwargs): ''' Return list of available nodes from catalog. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The list of available nodes. CLI Example: .. code-block:: bash salt '*' consul.catalog_nodes ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/nodes' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_services(consul_url=None, token=None, **kwargs): ''' Return list of available services rom catalog. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The list of available services. CLI Example: .. code-block:: bash salt '*' consul.catalog_services ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/services' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_service(consul_url=None, token=None, service=None, **kwargs): ''' Information about the registered service. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param tag: Filter returned services with tag parameter. :return: Information about the requested service. CLI Example: .. code-block:: bash salt '*' consul.catalog_service service='redis' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if 'tag' in kwargs: query_params['tag'] = kwargs['tag'] function = 'catalog/service/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_node(consul_url=None, token=None, node=None, **kwargs): ''' Information about the registered node. :param consul_url: The Consul server URL. :param node: The node to request information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.catalog_service service='redis' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/node/{0}'.format(node) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_node(consul_url=None, token=None, node=None, **kwargs): ''' Health information about the registered node. :param consul_url: The Consul server URL. :param node: The node to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_node node='node1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'health/node/{0}'.format(node) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_checks(consul_url=None, token=None, service=None, **kwargs): ''' Health information about the registered service. :param consul_url: The Consul server URL. :param service: The service to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_checks service='redis1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'health/checks/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_service(consul_url=None, token=None, service=None, **kwargs): ''' Health information about the registered service. :param consul_url: The Consul server URL. :param service: The service to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param tag: Filter returned services with tag parameter. :param passing: Filter results to only nodes with all checks in the passing state. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_service service='redis1' salt '*' consul.health_service service='redis1' passing='True' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if 'tag' in kwargs: query_params['tag'] = kwargs['tag'] if 'passing' in kwargs: query_params['passing'] = kwargs['passing'] function = 'health/service/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_state(consul_url=None, token=None, state=None, **kwargs): ''' Returns the checks in the state provided on the path. :param consul_url: The Consul server URL. :param state: The state to show checks for. The supported states are any, unknown, passing, warning, or critical. The any state is a wildcard that can be used to return all checks. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The checks in the provided state. CLI Example: .. code-block:: bash salt '*' consul.health_state state='redis1' salt '*' consul.health_state service='redis1' passing='True' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not state: raise SaltInvocationError('Required argument "state" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if state not in ('any', 'unknown', 'passing', 'warning', 'critical'): ret['message'] = 'State must be any, unknown, passing, warning, or critical.' ret['res'] = False return ret function = 'health/state/{0}'.format(state) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def status_leader(consul_url=None, token=None): ''' Returns the current Raft leader :param consul_url: The Consul server URL. :return: The address of the Raft leader. CLI Example: .. code-block:: bash salt '*' consul.status_leader ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'status/leader' ret = _query(consul_url=consul_url, function=function, token=token) return ret def status_peers(consul_url, token=None): ''' Returns the current Raft peer set :param consul_url: The Consul server URL. :return: Retrieves the Raft peers for the datacenter in which the agent is running. CLI Example: .. code-block:: bash salt '*' consul.status_peers ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'status/peers' ret = _query(consul_url=consul_url, function=function, token=token) return ret def acl_create(consul_url=None, token=None, **kwargs): ''' Create a new ACL token. :param consul_url: The Consul server URL. :param name: Meaningful indicator of the ACL's purpose. :param type: Type is either client or management. A management token is comparable to a root user and has the ability to perform any action including creating, modifying, and deleting ACLs. :param rules: The Consul server URL. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_create ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'type' in kwargs: data['Type'] = kwargs['type'] if 'rules' in kwargs: data['Rules'] = kwargs['rules'] function = 'acl/create' res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} created.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = ('Removing Catalog ' 'item {0} failed.'.format(kwargs['name'])) return ret def acl_update(consul_url=None, token=None, **kwargs): ''' Update an ACL token. :param consul_url: The Consul server URL. :param name: Meaningful indicator of the ACL's purpose. :param id: Unique identifier for the ACL to update. :param type: Type is either client or management. A management token is comparable to a root user and has the ability to perform any action including creating, modifying, and deleting ACLs. :param rules: The Consul server URL. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_update ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' in kwargs: data['ID'] = kwargs['id'] else: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'type' in kwargs: data['Type'] = kwargs['type'] if 'rules' in kwargs: data['Rules'] = kwargs['rules'] function = 'acl/update' res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} created.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = ('Adding ACL ' '{0} failed.'.format(kwargs['name'])) return ret def acl_delete(consul_url=None, token=None, **kwargs): ''' Delete an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_delete id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/delete/{0}'.format(kwargs['id']) res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} deleted.'.format(kwargs['id']) else: ret['res'] = False ret['message'] = ('Removing ACL ' '{0} failed.'.format(kwargs['id'])) return ret def acl_info(consul_url=None, **kwargs): ''' Information about an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Information about the ACL requested. CLI Example: .. code-block:: bash salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/info/{0}'.format(kwargs['id']) ret = _query(consul_url=consul_url, data=data, method='GET', function=function) return ret def acl_clone(consul_url=None, token=None, **kwargs): ''' Information about an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Boolean, message of success or failure, and new ID of cloned ACL. CLI Example: .. code-block:: bash salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/clone/{0}'.format(kwargs['id']) res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} cloned.'.format(kwargs['name']) ret['ID'] = ret['data'] else: ret['res'] = False ret['message'] = ('Cloning ACL' 'item {0} failed.'.format(kwargs['name'])) return ret def acl_list(consul_url=None, token=None, **kwargs): ''' List the ACL tokens. :param consul_url: The Consul server URL. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.acl_list ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/list' ret = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) return ret def event_fire(consul_url=None, token=None, name=None, **kwargs): ''' List the ACL tokens. :param consul_url: The Consul server URL. :param name: The name of the event to fire. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param node: Filter by node name. :param service: Filter by service name. :param tag: Filter by tag name. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.event_fire name='deploy' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not name: raise SaltInvocationError('Required argument "name" is missing.') if 'dc' in kwargs: query_params = kwargs['dc'] if 'node' in kwargs: query_params = kwargs['node'] if 'service' in kwargs: query_params = kwargs['service'] if 'tag' in kwargs: query_params = kwargs['tag'] function = 'event/fire/{0}'.format(name) res = _query(consul_url=consul_url, token=token, query_params=query_params, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'Event {0} fired.'.format(name) ret['data'] = ret['data'] else: ret['res'] = False ret['message'] = ('Cloning ACL' 'item {0} failed.'.format(kwargs['name'])) return ret def event_list(consul_url=None, token=None, **kwargs): ''' List the recent events. :param consul_url: The Consul server URL. :param name: The name of the event to fire. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.event_list ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: query_params = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') function = 'event/list/' ret = _query(consul_url=consul_url, token=token, query_params=query_params, function=function) return ret
saltstack/salt
salt/modules/consul.py
agent_checks
python
def agent_checks(consul_url=None, token=None): ''' Returns the checks the local agent is managing :param consul_url: The Consul server URL. :return: Returns the checks the local agent is managing CLI Example: .. code-block:: bash salt '*' consul.agent_checks ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/checks' ret = _query(consul_url=consul_url, function=function, token=token, method='GET') return ret
Returns the checks the local agent is managing :param consul_url: The Consul server URL. :return: Returns the checks the local agent is managing CLI Example: .. code-block:: bash salt '*' consul.agent_checks
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/consul.py#L409-L437
[ "def _query(function,\n consul_url,\n token=None,\n method='GET',\n api_version='v1',\n data=None,\n query_params=None):\n '''\n Consul object method function to construct and execute on the API URL.\n\n :param api_url: The Consul api url.\n :param api_version The Consul api version\n :param function: The Consul api function to perform.\n :param method: The HTTP method, e.g. GET or POST.\n :param data: The data to be sent for POST method. This param is ignored for GET requests.\n :return: The json response from the API call or False.\n '''\n\n if not query_params:\n query_params = {}\n\n ret = {'data': '',\n 'res': True}\n\n if not token:\n token = _get_token()\n\n headers = {\"X-Consul-Token\": token, \"Content-Type\": \"application/json\"}\n base_url = urllib.parse.urljoin(consul_url, '{0}/'.format(api_version))\n url = urllib.parse.urljoin(base_url, function, False)\n\n if method == 'GET':\n data = None\n else:\n if data is None:\n data = {}\n data = salt.utils.json.dumps(data)\n\n result = salt.utils.http.query(\n url,\n method=method,\n params=query_params,\n data=data,\n decode=True,\n status=True,\n header_dict=headers,\n opts=__opts__,\n )\n\n if result.get('status', None) == http_client.OK:\n ret['data'] = result.get('dict', result)\n ret['res'] = True\n elif result.get('status', None) == http_client.NO_CONTENT:\n ret['res'] = False\n elif result.get('status', None) == http_client.NOT_FOUND:\n ret['data'] = 'Key not found.'\n ret['res'] = False\n else:\n if result:\n ret['data'] = result\n ret['res'] = True\n else:\n ret['res'] = False\n return ret\n", "def _get_config():\n '''\n Retrieve Consul configuration\n '''\n return __salt__['config.get']('consul.url') or \\\n __salt__['config.get']('consul:url')\n" ]
# -*- coding: utf-8 -*- ''' Interact with Consul https://www.consul.io ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import base64 import logging # Import salt libs import salt.utils.http import salt.utils.json # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import http_client, urllib log = logging.getLogger(__name__) from salt.exceptions import SaltInvocationError # Don't shadow built-ins. __func_alias__ = { 'list_': 'list' } __virtualname__ = 'consul' def _get_config(): ''' Retrieve Consul configuration ''' return __salt__['config.get']('consul.url') or \ __salt__['config.get']('consul:url') def _get_token(): ''' Retrieve Consul configuration ''' return __salt__['config.get']('consul.token') or \ __salt__['config.get']('consul:token') def _query(function, consul_url, token=None, method='GET', api_version='v1', data=None, query_params=None): ''' Consul object method function to construct and execute on the API URL. :param api_url: The Consul api url. :param api_version The Consul api version :param function: The Consul api function to perform. :param method: The HTTP method, e.g. GET or POST. :param data: The data to be sent for POST method. This param is ignored for GET requests. :return: The json response from the API call or False. ''' if not query_params: query_params = {} ret = {'data': '', 'res': True} if not token: token = _get_token() headers = {"X-Consul-Token": token, "Content-Type": "application/json"} base_url = urllib.parse.urljoin(consul_url, '{0}/'.format(api_version)) url = urllib.parse.urljoin(base_url, function, False) if method == 'GET': data = None else: if data is None: data = {} data = salt.utils.json.dumps(data) result = salt.utils.http.query( url, method=method, params=query_params, data=data, decode=True, status=True, header_dict=headers, opts=__opts__, ) if result.get('status', None) == http_client.OK: ret['data'] = result.get('dict', result) ret['res'] = True elif result.get('status', None) == http_client.NO_CONTENT: ret['res'] = False elif result.get('status', None) == http_client.NOT_FOUND: ret['data'] = 'Key not found.' ret['res'] = False else: if result: ret['data'] = result ret['res'] = True else: ret['res'] = False return ret def list_(consul_url=None, token=None, key=None, **kwargs): ''' List keys in Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :return: The list of keys. CLI Example: .. code-block:: bash salt '*' consul.list salt '*' consul.list key='web' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret query_params = {} if 'recurse' in kwargs: query_params['recurse'] = 'True' # No key so recurse and show all values if not key: query_params['recurse'] = 'True' function = 'kv/' else: function = 'kv/{0}'.format(key) query_params['keys'] = 'True' query_params['separator'] = '/' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def get(consul_url=None, key=None, token=None, recurse=False, decode=False, raw=False): ''' Get key from Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param recurse: Return values recursively beginning at the value of key. :param decode: By default values are stored as Base64 encoded values, decode will return the whole key with the value decoded. :param raw: Simply return the decoded value of the key. :return: The keys in Consul. CLI Example: .. code-block:: bash salt '*' consul.get key='web/key1' salt '*' consul.get key='web' recurse=True salt '*' consul.get key='web' recurse=True decode=True By default values stored in Consul are base64 encoded, passing the decode option will show them as the decoded values. .. code-block:: bash salt '*' consul.get key='web' recurse=True decode=True raw=True By default Consult will return other information about the key, the raw option will return only the raw value. ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') query_params = {} function = 'kv/{0}'.format(key) if recurse: query_params['recurse'] = 'True' if raw: query_params['raw'] = True ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if ret['res']: if decode: for item in ret['data']: if item['Value'] is None: item['Value'] = "" else: item['Value'] = base64.b64decode(item['Value']) return ret def put(consul_url=None, token=None, key=None, value=None, **kwargs): ''' Put values into Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param value: The value to set the key to. :param flags: This can be used to specify an unsigned value between 0 and 2^64-1. Clients can choose to use this however makes sense for their application. :param cas: This flag is used to turn the PUT into a Check-And-Set operation. :param acquire: This flag is used to turn the PUT into a lock acquisition operation. :param release: This flag is used to turn the PUT into a lock release operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.put key='web/key1' value="Hello there" salt '*' consul.put key='web/key1' value="Hello there" acquire='d5d371f4-c380-5280-12fd-8810be175592' salt '*' consul.put key='web/key1' value="Hello there" release='d5d371f4-c380-5280-12fd-8810be175592' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') # Invalid to specified these together conflicting_args = ['cas', 'release', 'acquire'] for _l1 in conflicting_args: for _l2 in conflicting_args: if _l1 in kwargs and _l2 in kwargs and _l1 != _l2: raise SaltInvocationError('Using arguments `{0}` and `{1}`' ' together is invalid.'.format(_l1, _l2)) query_params = {} available_sessions = session_list(consul_url=consul_url, return_list=True) _current = get(consul_url=consul_url, key=key) if 'flags' in kwargs: if kwargs['flags'] >= 0 and kwargs['flags'] <= 2**64: query_params['flags'] = kwargs['flags'] if 'cas' in kwargs: if _current['res']: if kwargs['cas'] == 0: ret['message'] = ('Key {0} exists, index ' 'must be non-zero.'.format(key)) ret['res'] = False return ret if kwargs['cas'] != _current['data']['ModifyIndex']: ret['message'] = ('Key {0} exists, but indexes ' 'do not match.'.format(key)) ret['res'] = False return ret query_params['cas'] = kwargs['cas'] else: ret['message'] = ('Key {0} does not exists, ' 'CAS argument can not be used.'.format(key)) ret['res'] = False return ret if 'acquire' in kwargs: if kwargs['acquire'] not in available_sessions: ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire']) ret['res'] = False return ret query_params['acquire'] = kwargs['acquire'] if 'release' in kwargs: if _current['res']: if 'Session' in _current['data']: if _current['data']['Session'] == kwargs['release']: query_params['release'] = kwargs['release'] else: ret['message'] = '{0} locked by another session.'.format(key) ret['res'] = False return ret else: ret['message'] = '{0} is not a valid session.'.format(kwargs['acquire']) ret['res'] = False else: log.error('Key {0} does not exist. Skipping release.') data = value function = 'kv/{0}'.format(key) method = 'PUT' ret = _query(consul_url=consul_url, token=token, function=function, method=method, data=data, query_params=query_params) if ret['res']: ret['res'] = True ret['data'] = 'Added key {0} with value {1}.'.format(key, value) else: ret['res'] = False ret['data'] = 'Unable to add key {0} with value {1}.'.format(key, value) return ret def delete(consul_url=None, token=None, key=None, **kwargs): ''' Delete values from Consul :param consul_url: The Consul server URL. :param key: The key to use as the starting point for the list. :param recurse: Delete values recursively beginning at the value of key. :param cas: This flag is used to turn the DELETE into a Check-And-Set operation. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.delete key='web' salt '*' consul.delete key='web' recurse='True' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not key: raise SaltInvocationError('Required argument "key" is missing.') query_params = {} if 'recurse' in kwargs: query_params['recurse'] = True if 'cas' in kwargs: if kwargs['cas'] > 0: query_params['cas'] = kwargs['cas'] else: ret['message'] = ('Check and Set Operation ', 'value must be greater than 0.') ret['res'] = False return ret function = 'kv/{0}'.format(key) ret = _query(consul_url=consul_url, token=token, function=function, method='DELETE', query_params=query_params) if ret['res']: ret['res'] = True ret['message'] = 'Deleted key {0}.'.format(key) else: ret['res'] = False ret['message'] = 'Unable to delete key {0}.'.format(key) return ret def agent_services(consul_url=None, token=None): ''' Returns the services the local agent is managing :param consul_url: The Consul server URL. :return: Returns the services the local agent is managing CLI Example: .. code-block:: bash salt '*' consul.agent_services ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/services' ret = _query(consul_url=consul_url, function=function, token=token, method='GET') return ret def agent_members(consul_url=None, token=None, **kwargs): ''' Returns the members as seen by the local serf agent :param consul_url: The Consul server URL. :return: Returns the members as seen by the local serf agent CLI Example: .. code-block:: bash salt '*' consul.agent_members ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'wan' in kwargs: query_params['wan'] = kwargs['wan'] function = 'agent/members' ret = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) return ret def agent_self(consul_url=None, token=None): ''' Returns the local node configuration :param consul_url: The Consul server URL. :return: Returns the local node configuration CLI Example: .. code-block:: bash salt '*' consul.agent_self ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'agent/self' ret = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) return ret def agent_maintenance(consul_url=None, token=None, **kwargs): ''' Manages node maintenance mode :param consul_url: The Consul server URL. :param enable: The enable flag is required. Acceptable values are either true (to enter maintenance mode) or false (to resume normal operation). :param reason: If provided, its value should be a text string explaining the reason for placing the node into maintenance mode. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_maintenance enable='False' reason='Upgrade in progress' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'enable' in kwargs: query_params['enable'] = kwargs['enable'] else: ret['message'] = 'Required parameter "enable" is missing.' ret['res'] = False return ret if 'reason' in kwargs: query_params['reason'] = kwargs['reason'] function = 'agent/maintenance' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = ('Agent maintenance mode ' '{0}ed.'.format(kwargs['enable'])) else: ret['res'] = True ret['message'] = 'Unable to change maintenance mode for agent.' return ret def agent_join(consul_url=None, token=None, address=None, **kwargs): ''' Triggers the local agent to join a node :param consul_url: The Consul server URL. :param address: The address for the agent to connect to. :param wan: Causes the agent to attempt to join using the WAN pool. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_join address='192.168.1.1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not address: raise SaltInvocationError('Required argument "address" is missing.') if 'wan' in kwargs: query_params['wan'] = kwargs['wan'] function = 'agent/join/{0}'.format(address) res = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Agent joined the cluster' else: ret['res'] = False ret['message'] = 'Unable to join the cluster.' return ret def agent_leave(consul_url=None, token=None, node=None): ''' Used to instruct the agent to force a node into the left state. :param consul_url: The Consul server URL. :param node: The node the agent will force into left state :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_leave node='web1.example.com' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') function = 'agent/force-leave/{0}'.format(node) res = _query(consul_url=consul_url, function=function, token=token, method='GET', query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Node {0} put in leave state.'.format(node) else: ret['res'] = False ret['message'] = 'Unable to change state for {0}.'.format(node) return ret def agent_check_register(consul_url=None, token=None, **kwargs): ''' The register endpoint is used to add a new check to the local agent. :param consul_url: The Consul server URL. :param name: The description of what the check is for. :param id: The unique name to use for the check, if not provided 'name' is used. :param notes: Human readable description of the check. :param script: If script is provided, the check type is a script, and Consul will evaluate that script based on the interval parameter. :param http: Check will perform an HTTP GET request against the value of HTTP (expected to be a URL) based on the interval parameter. :param ttl: If a TTL type is used, then the TTL update endpoint must be used periodically to update the state of the check. :param interval: Interval at which the check should run. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_register name='Memory Utilization' script='/usr/local/bin/check_mem.py' interval='15s' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if True not in [True for item in ('script', 'http', 'ttl') if item in kwargs]: ret['message'] = 'Required parameter "script" or "http" is missing.' ret['res'] = False return ret if 'id' in kwargs: data['ID'] = kwargs['id'] if 'notes' in kwargs: data['Notes'] = kwargs['notes'] if 'script' in kwargs: if 'interval' not in kwargs: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret data['Script'] = kwargs['script'] data['Interval'] = kwargs['interval'] if 'http' in kwargs: if 'interval' not in kwargs: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret data['HTTP'] = kwargs['http'] data['Interval'] = kwargs['interval'] if 'ttl' in kwargs: data['TTL'] = kwargs['ttl'] function = 'agent/check/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = ('Check {0} added to agent.'.format(kwargs['name'])) else: ret['res'] = False ret['message'] = 'Unable to add check to agent.' return ret def agent_check_deregister(consul_url=None, token=None, checkid=None): ''' The agent will take care of deregistering the check from the Catalog. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_deregister checkid='Memory Utilization' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') function = 'agent/check/deregister/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, method='GET') if res['res']: ret['res'] = True ret['message'] = ('Check {0} removed from agent.'.format(checkid)) else: ret['res'] = False ret['message'] = 'Unable to remove check from agent.' return ret def agent_check_pass(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to passing and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to mark as passing. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_pass checkid='redis_check1' note='Forcing check into passing state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/pass/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as passing.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_check_warn(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to warning and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_warn checkid='redis_check1' note='Forcing check into warning state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/warn/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as warning.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_check_fail(consul_url=None, token=None, checkid=None, **kwargs): ''' This endpoint is used with a check that is of the TTL type. When this is called, the status of the check is set to critical and the TTL clock is reset. :param consul_url: The Consul server URL. :param checkid: The ID of the check to deregister from Consul. :param note: A human-readable message with the status of the check. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_check_fail checkid='redis_check1' note='Forcing check into critical state.' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not checkid: raise SaltInvocationError('Required argument "checkid" is missing.') if 'note' in kwargs: query_params['note'] = kwargs['note'] function = 'agent/check/fail/{0}'.format(checkid) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params, method='GET') if res['res']: ret['res'] = True ret['message'] = 'Check {0} marked as critical.'.format(checkid) else: ret['res'] = False ret['message'] = 'Unable to update check {0}.'.format(checkid) return ret def agent_service_register(consul_url=None, token=None, **kwargs): ''' The used to add a new service, with an optional health check, to the local agent. :param consul_url: The Consul server URL. :param name: A name describing the service. :param address: The address used by the service, defaults to the address of the agent. :param port: The port used by the service. :param id: Unique ID to identify the service, if not provided the value of the name parameter is used. :param tags: Identifying tags for service, string or list. :param script: If script is provided, the check type is a script, and Consul will evaluate that script based on the interval parameter. :param http: Check will perform an HTTP GET request against the value of HTTP (expected to be a URL) based on the interval parameter. :param check_ttl: If a TTL type is used, then the TTL update endpoint must be used periodically to update the state of the check. :param check_interval: Interval at which the check should run. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_register name='redis' tags='["master", "v1"]' address="127.0.0.1" port="8080" check_script="/usr/local/bin/check_redis.py" interval="10s" ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret lc_kwargs = dict() for k, v in six.iteritems(kwargs): lc_kwargs[k.lower()] = v if 'name' in lc_kwargs: data['Name'] = lc_kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'address' in lc_kwargs: data['Address'] = lc_kwargs['address'] if 'port' in lc_kwargs: data['Port'] = lc_kwargs['port'] if 'id' in lc_kwargs: data['ID'] = lc_kwargs['id'] if 'tags' in lc_kwargs: _tags = lc_kwargs['tags'] if not isinstance(_tags, list): _tags = [_tags] data['Tags'] = _tags if 'enabletagoverride' in lc_kwargs: data['EnableTagOverride'] = lc_kwargs['enabletagoverride'] if 'check' in lc_kwargs: dd = dict() for k, v in six.iteritems(lc_kwargs['check']): dd[k.lower()] = v interval_required = False check_dd = dict() if 'script' in dd: interval_required = True check_dd['Script'] = dd['script'] if 'http' in dd: interval_required = True check_dd['HTTP'] = dd['http'] if 'ttl' in dd: check_dd['TTL'] = dd['ttl'] if 'interval' in dd: check_dd['Interval'] = dd['interval'] if interval_required: if 'Interval' not in check_dd: ret['message'] = 'Required parameter "interval" is missing.' ret['res'] = False return ret else: if 'Interval' in check_dd: del check_dd['Interval'] # not required, so ignore it if check_dd > 0: data['Check'] = check_dd # if empty, ignore it function = 'agent/service/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Service {0} registered on agent.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to register service {0}.'.format(kwargs['name']) return ret def agent_service_deregister(consul_url=None, token=None, serviceid=None): ''' Used to remove a service. :param consul_url: The Consul server URL. :param serviceid: A serviceid describing the service. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_deregister serviceid='redis' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not serviceid: raise SaltInvocationError('Required argument "serviceid" is missing.') function = 'agent/service/deregister/{0}'.format(serviceid) res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Service {0} removed from agent.'.format(serviceid) else: ret['res'] = False ret['message'] = 'Unable to remove service {0}.'.format(serviceid) return ret def agent_service_maintenance(consul_url=None, token=None, serviceid=None, **kwargs): ''' Used to place a service into maintenance mode. :param consul_url: The Consul server URL. :param serviceid: A name of the service. :param enable: Whether the service should be enabled or disabled. :param reason: A human readable message of why the service was enabled or disabled. :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.agent_service_deregister serviceid='redis' enable='True' reason='Down for upgrade' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not serviceid: raise SaltInvocationError('Required argument "serviceid" is missing.') if 'enable' in kwargs: query_params['enable'] = kwargs['enable'] else: ret['message'] = 'Required parameter "enable" is missing.' ret['res'] = False return ret if 'reason' in kwargs: query_params['reason'] = kwargs['reason'] function = 'agent/service/maintenance/{0}'.format(serviceid) res = _query(consul_url=consul_url, token=token, function=function, query_params=query_params) if res['res']: ret['res'] = True ret['message'] = ('Service {0} set in ' 'maintenance mode.'.format(serviceid)) else: ret['res'] = False ret['message'] = ('Unable to set service ' '{0} to maintenance mode.'.format(serviceid)) return ret def session_create(consul_url=None, token=None, **kwargs): ''' Used to create a session. :param consul_url: The Consul server URL. :param lockdelay: Duration string using a "s" suffix for seconds. The default is 15s. :param node: Must refer to a node that is already registered, if specified. By default, the agent's own node name is used. :param name: A human-readable name for the session :param checks: A list of associated health checks. It is highly recommended that, if you override this list, you include the default "serfHealth". :param behavior: Can be set to either release or delete. This controls the behavior when a session is invalidated. By default, this is release, causing any locks that are held to be released. Changing this to delete causes any locks that are held to be deleted. delete is useful for creating ephemeral key/value entries. :param ttl: Session is invalidated if it is not renewed before the TTL expires :return: Boolean and message indicating success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_create node='node1' name='my-session' behavior='delete' ttl='3600s' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret data = {} if 'lockdelay' in kwargs: data['LockDelay'] = kwargs['lockdelay'] if 'node' in kwargs: data['Node'] = kwargs['node'] if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'checks' in kwargs: data['Touch'] = kwargs['touch'] if 'behavior' in kwargs: if not kwargs['behavior'] in ('delete', 'release'): ret['message'] = ('Behavior must be ', 'either delete or release.') ret['res'] = False return ret data['Behavior'] = kwargs['behavior'] if 'ttl' in kwargs: _ttl = kwargs['ttl'] if six.text_type(_ttl).endswith('s'): _ttl = _ttl[:-1] if int(_ttl) < 0 or int(_ttl) > 3600: ret['message'] = ('TTL must be ', 'between 0 and 3600.') ret['res'] = False return ret data['TTL'] = '{0}s'.format(_ttl) function = 'session/create' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Created session {0}.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to create session {0}.'.format(kwargs['name']) return ret def session_list(consul_url=None, token=None, return_list=False, **kwargs): ''' Used to list sessions. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param return_list: By default, all information about the sessions is returned, using the return_list parameter will return a list of session IDs. :return: A list of all available sessions. CLI Example: .. code-block:: bash salt '*' consul.session_list ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/list' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if return_list: _list = [] for item in ret['data']: _list.append(item['ID']) return _list return ret def session_destroy(consul_url=None, token=None, session=None, **kwargs): ''' Destroy session :param consul_url: The Consul server URL. :param session: The ID of the session to destroy. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_destroy session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not session: raise SaltInvocationError('Required argument "session" is missing.') query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/destroy/{0}'.format(session) res = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) if res['res']: ret['res'] = True ret['message'] = 'Created Service {0}.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = 'Unable to create service {0}.'.format(kwargs['name']) return ret def session_info(consul_url=None, token=None, session=None, **kwargs): ''' Information about a session :param consul_url: The Consul server URL. :param session: The ID of the session to return information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.session_info session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not session: raise SaltInvocationError('Required argument "session" is missing.') query_params = {} if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'session/info/{0}'.format(session) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_register(consul_url=None, token=None, **kwargs): ''' Registers a new node, service, or check :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param node: The node to register. :param address: The address of the node. :param service: The service that will be registered. :param service_address: The address that the service listens on. :param service_port: The port for the service. :param service_id: A unique identifier for the service, if this is not provided "name" will be used. :param service_tags: Any tags associated with the service. :param check: The name of the health check to register :param check_status: The initial status of the check, must be one of unknown, passing, warning, or critical. :param check_service: The service that the check is performed against. :param check_id: Unique identifier for the service. :param check_notes: An opaque field that is meant to hold human-readable text. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.catalog_register node='node1' address='192.168.1.1' service='redis' service_address='127.0.0.1' service_port='8080' service_id='redis_server1' ''' ret = {} data = {} data['NodeMeta'] = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'datacenter' in kwargs: data['Datacenter'] = kwargs['datacenter'] if 'node' in kwargs: data['Node'] = kwargs['node'] else: ret['message'] = 'Required argument node argument is missing.' ret['res'] = False return ret if 'address' in kwargs: if isinstance(kwargs['address'], list): _address = kwargs['address'][0] else: _address = kwargs['address'] data['Address'] = _address else: ret['message'] = 'Required argument address argument is missing.' ret['res'] = False return ret if 'ip_interfaces' in kwargs: data['TaggedAddresses'] = {} for k in kwargs['ip_interfaces']: if kwargs['ip_interfaces'].get(k): data['TaggedAddresses'][k] = kwargs['ip_interfaces'][k][0] if 'service' in kwargs: data['Service'] = {} data['Service']['Service'] = kwargs['service'] if 'service_address' in kwargs: data['Service']['Address'] = kwargs['service_address'] if 'service_port' in kwargs: data['Service']['Port'] = kwargs['service_port'] if 'service_id' in kwargs: data['Service']['ID'] = kwargs['service_id'] if 'service_tags' in kwargs: _tags = kwargs['service_tags'] if not isinstance(_tags, list): _tags = [_tags] data['Service']['Tags'] = _tags if 'cpu' in kwargs: data['NodeMeta']['Cpu'] = kwargs['cpu'] if 'num_cpus' in kwargs: data['NodeMeta']['Cpu_num'] = kwargs['num_cpus'] if 'mem' in kwargs: data['NodeMeta']['Memory'] = kwargs['mem'] if 'oscode' in kwargs: data['NodeMeta']['Os'] = kwargs['oscode'] if 'osarch' in kwargs: data['NodeMeta']['Osarch'] = kwargs['osarch'] if 'kernel' in kwargs: data['NodeMeta']['Kernel'] = kwargs['kernel'] if 'kernelrelease' in kwargs: data['NodeMeta']['Kernelrelease'] = kwargs['kernelrelease'] if 'localhost' in kwargs: data['NodeMeta']['localhost'] = kwargs['localhost'] if 'nodename' in kwargs: data['NodeMeta']['nodename'] = kwargs['nodename'] if 'os_family' in kwargs: data['NodeMeta']['os_family'] = kwargs['os_family'] if 'lsb_distrib_description' in kwargs: data['NodeMeta']['lsb_distrib_description'] = kwargs['lsb_distrib_description'] if 'master' in kwargs: data['NodeMeta']['master'] = kwargs['master'] if 'check' in kwargs: data['Check'] = {} data['Check']['Name'] = kwargs['check'] if 'check_status' in kwargs: if kwargs['check_status'] not in ('unknown', 'passing', 'warning', 'critical'): ret['message'] = 'Check status must be unknown, passing, warning, or critical.' ret['res'] = False return ret data['Check']['Status'] = kwargs['check_status'] if 'check_service' in kwargs: data['Check']['ServiceID'] = kwargs['check_service'] if 'check_id' in kwargs: data['Check']['CheckID'] = kwargs['check_id'] if 'check_notes' in kwargs: data['Check']['Notes'] = kwargs['check_notes'] function = 'catalog/register' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = ('Catalog registration ' 'for {0} successful.'.format(kwargs['node'])) else: ret['res'] = False ret['message'] = ('Catalog registration ' 'for {0} failed.'.format(kwargs['node'])) ret['data'] = data return ret def catalog_deregister(consul_url=None, token=None, **kwargs): ''' Deregisters a node, service, or check :param consul_url: The Consul server URL. :param node: The node to deregister. :param datacenter: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param checkid: The ID of the health check to deregister. :param serviceid: The ID of the service to deregister. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.catalog_register node='node1' serviceid='redis_server1' checkid='redis_check1' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'datacenter' in kwargs: data['Datacenter'] = kwargs['datacenter'] if 'node' in kwargs: data['Node'] = kwargs['node'] else: ret['message'] = 'Node argument required.' ret['res'] = False return ret if 'checkid' in kwargs: data['CheckID'] = kwargs['checkid'] if 'serviceid' in kwargs: data['ServiceID'] = kwargs['serviceid'] function = 'catalog/deregister' res = _query(consul_url=consul_url, function=function, token=token, method='PUT', data=data) if res['res']: ret['res'] = True ret['message'] = 'Catalog item {0} removed.'.format(kwargs['node']) else: ret['res'] = False ret['message'] = ('Removing Catalog ' 'item {0} failed.'.format(kwargs['node'])) return ret def catalog_datacenters(consul_url=None, token=None): ''' Return list of available datacenters from catalog. :param consul_url: The Consul server URL. :return: The list of available datacenters. CLI Example: .. code-block:: bash salt '*' consul.catalog_datacenters ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'catalog/datacenters' ret = _query(consul_url=consul_url, function=function, token=token) return ret def catalog_nodes(consul_url=None, token=None, **kwargs): ''' Return list of available nodes from catalog. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The list of available nodes. CLI Example: .. code-block:: bash salt '*' consul.catalog_nodes ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/nodes' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_services(consul_url=None, token=None, **kwargs): ''' Return list of available services rom catalog. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The list of available services. CLI Example: .. code-block:: bash salt '*' consul.catalog_services ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/services' ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_service(consul_url=None, token=None, service=None, **kwargs): ''' Information about the registered service. :param consul_url: The Consul server URL. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param tag: Filter returned services with tag parameter. :return: Information about the requested service. CLI Example: .. code-block:: bash salt '*' consul.catalog_service service='redis' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if 'tag' in kwargs: query_params['tag'] = kwargs['tag'] function = 'catalog/service/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def catalog_node(consul_url=None, token=None, node=None, **kwargs): ''' Information about the registered node. :param consul_url: The Consul server URL. :param node: The node to request information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.catalog_service service='redis' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'catalog/node/{0}'.format(node) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_node(consul_url=None, token=None, node=None, **kwargs): ''' Health information about the registered node. :param consul_url: The Consul server URL. :param node: The node to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_node node='node1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not node: raise SaltInvocationError('Required argument "node" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'health/node/{0}'.format(node) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_checks(consul_url=None, token=None, service=None, **kwargs): ''' Health information about the registered service. :param consul_url: The Consul server URL. :param service: The service to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_checks service='redis1' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] function = 'health/checks/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_service(consul_url=None, token=None, service=None, **kwargs): ''' Health information about the registered service. :param consul_url: The Consul server URL. :param service: The service to request health information about. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param tag: Filter returned services with tag parameter. :param passing: Filter results to only nodes with all checks in the passing state. :return: Health information about the requested node. CLI Example: .. code-block:: bash salt '*' consul.health_service service='redis1' salt '*' consul.health_service service='redis1' passing='True' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not service: raise SaltInvocationError('Required argument "service" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if 'tag' in kwargs: query_params['tag'] = kwargs['tag'] if 'passing' in kwargs: query_params['passing'] = kwargs['passing'] function = 'health/service/{0}'.format(service) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def health_state(consul_url=None, token=None, state=None, **kwargs): ''' Returns the checks in the state provided on the path. :param consul_url: The Consul server URL. :param state: The state to show checks for. The supported states are any, unknown, passing, warning, or critical. The any state is a wildcard that can be used to return all checks. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :return: The checks in the provided state. CLI Example: .. code-block:: bash salt '*' consul.health_state state='redis1' salt '*' consul.health_state service='redis1' passing='True' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not state: raise SaltInvocationError('Required argument "state" is missing.') if 'dc' in kwargs: query_params['dc'] = kwargs['dc'] if state not in ('any', 'unknown', 'passing', 'warning', 'critical'): ret['message'] = 'State must be any, unknown, passing, warning, or critical.' ret['res'] = False return ret function = 'health/state/{0}'.format(state) ret = _query(consul_url=consul_url, function=function, token=token, query_params=query_params) return ret def status_leader(consul_url=None, token=None): ''' Returns the current Raft leader :param consul_url: The Consul server URL. :return: The address of the Raft leader. CLI Example: .. code-block:: bash salt '*' consul.status_leader ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'status/leader' ret = _query(consul_url=consul_url, function=function, token=token) return ret def status_peers(consul_url, token=None): ''' Returns the current Raft peer set :param consul_url: The Consul server URL. :return: Retrieves the Raft peers for the datacenter in which the agent is running. CLI Example: .. code-block:: bash salt '*' consul.status_peers ''' ret = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret function = 'status/peers' ret = _query(consul_url=consul_url, function=function, token=token) return ret def acl_create(consul_url=None, token=None, **kwargs): ''' Create a new ACL token. :param consul_url: The Consul server URL. :param name: Meaningful indicator of the ACL's purpose. :param type: Type is either client or management. A management token is comparable to a root user and has the ability to perform any action including creating, modifying, and deleting ACLs. :param rules: The Consul server URL. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_create ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'type' in kwargs: data['Type'] = kwargs['type'] if 'rules' in kwargs: data['Rules'] = kwargs['rules'] function = 'acl/create' res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} created.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = ('Removing Catalog ' 'item {0} failed.'.format(kwargs['name'])) return ret def acl_update(consul_url=None, token=None, **kwargs): ''' Update an ACL token. :param consul_url: The Consul server URL. :param name: Meaningful indicator of the ACL's purpose. :param id: Unique identifier for the ACL to update. :param type: Type is either client or management. A management token is comparable to a root user and has the ability to perform any action including creating, modifying, and deleting ACLs. :param rules: The Consul server URL. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_update ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' in kwargs: data['ID'] = kwargs['id'] else: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret if 'name' in kwargs: data['Name'] = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') if 'type' in kwargs: data['Type'] = kwargs['type'] if 'rules' in kwargs: data['Rules'] = kwargs['rules'] function = 'acl/update' res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} created.'.format(kwargs['name']) else: ret['res'] = False ret['message'] = ('Adding ACL ' '{0} failed.'.format(kwargs['name'])) return ret def acl_delete(consul_url=None, token=None, **kwargs): ''' Delete an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Boolean & message of success or failure. CLI Example: .. code-block:: bash salt '*' consul.acl_delete id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/delete/{0}'.format(kwargs['id']) res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} deleted.'.format(kwargs['id']) else: ret['res'] = False ret['message'] = ('Removing ACL ' '{0} failed.'.format(kwargs['id'])) return ret def acl_info(consul_url=None, **kwargs): ''' Information about an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Information about the ACL requested. CLI Example: .. code-block:: bash salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/info/{0}'.format(kwargs['id']) ret = _query(consul_url=consul_url, data=data, method='GET', function=function) return ret def acl_clone(consul_url=None, token=None, **kwargs): ''' Information about an ACL token. :param consul_url: The Consul server URL. :param id: Unique identifier for the ACL to update. :return: Boolean, message of success or failure, and new ID of cloned ACL. CLI Example: .. code-block:: bash salt '*' consul.acl_info id='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/clone/{0}'.format(kwargs['id']) res = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'ACL {0} cloned.'.format(kwargs['name']) ret['ID'] = ret['data'] else: ret['res'] = False ret['message'] = ('Cloning ACL' 'item {0} failed.'.format(kwargs['name'])) return ret def acl_list(consul_url=None, token=None, **kwargs): ''' List the ACL tokens. :param consul_url: The Consul server URL. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.acl_list ''' ret = {} data = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'id' not in kwargs: ret['message'] = 'Required parameter "id" is missing.' ret['res'] = False return ret function = 'acl/list' ret = _query(consul_url=consul_url, token=token, data=data, method='PUT', function=function) return ret def event_fire(consul_url=None, token=None, name=None, **kwargs): ''' List the ACL tokens. :param consul_url: The Consul server URL. :param name: The name of the event to fire. :param dc: By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. :param node: Filter by node name. :param service: Filter by service name. :param tag: Filter by tag name. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.event_fire name='deploy' ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if not name: raise SaltInvocationError('Required argument "name" is missing.') if 'dc' in kwargs: query_params = kwargs['dc'] if 'node' in kwargs: query_params = kwargs['node'] if 'service' in kwargs: query_params = kwargs['service'] if 'tag' in kwargs: query_params = kwargs['tag'] function = 'event/fire/{0}'.format(name) res = _query(consul_url=consul_url, token=token, query_params=query_params, method='PUT', function=function) if res['res']: ret['res'] = True ret['message'] = 'Event {0} fired.'.format(name) ret['data'] = ret['data'] else: ret['res'] = False ret['message'] = ('Cloning ACL' 'item {0} failed.'.format(kwargs['name'])) return ret def event_list(consul_url=None, token=None, **kwargs): ''' List the recent events. :param consul_url: The Consul server URL. :param name: The name of the event to fire. :return: List of ACLs CLI Example: .. code-block:: bash salt '*' consul.event_list ''' ret = {} query_params = {} if not consul_url: consul_url = _get_config() if not consul_url: log.error('No Consul URL found.') ret['message'] = 'No Consul URL found.' ret['res'] = False return ret if 'name' in kwargs: query_params = kwargs['name'] else: raise SaltInvocationError('Required argument "name" is missing.') function = 'event/list/' ret = _query(consul_url=consul_url, token=token, query_params=query_params, function=function) return ret