repository_name
stringclasses
316 values
func_path_in_repository
stringlengths
6
223
func_name
stringlengths
1
134
language
stringclasses
1 value
func_code_string
stringlengths
57
65.5k
func_documentation_string
stringlengths
1
46.3k
split_name
stringclasses
1 value
func_code_url
stringlengths
91
315
called_functions
listlengths
1
156
enclosing_scope
stringlengths
2
1.48M
saltstack/salt
salt/modules/opkg.py
owner
python
def owner(*paths, **kwargs): # pylint: disable=unused-argument ''' Return the name of the package that owns the file. Multiple file paths can be passed. Like :mod:`pkg.version <salt.modules.opkg.version`, if a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. CLI Example: salt '*' pkg.owner /usr/bin/apachectl salt '*' pkg.owner /usr/bin/apachectl /usr/bin/basename ''' if not paths: return '' ret = {} cmd_search = ['opkg', 'search'] for path in paths: cmd = cmd_search[:] cmd.append(path) output = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace', python_shell=False) if output: ret[path] = output.split(' - ')[0].strip() else: ret[path] = '' if len(ret) == 1: return next(six.itervalues(ret)) return ret
Return the name of the package that owns the file. Multiple file paths can be passed. Like :mod:`pkg.version <salt.modules.opkg.version`, if a single path is passed, a string will be returned, and if multiple paths are passed, a dictionary of file/package name pairs will be returned. If the file is not owned by a package, or is not present on the minion, then an empty string will be returned for that path. CLI Example: salt '*' pkg.owner /usr/bin/apachectl salt '*' pkg.owner /usr/bin/apachectl /usr/bin/basename
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/opkg.py#L1599-L1630
[ "def itervalues(d, **kw):\n return d.itervalues(**kw)\n" ]
# -*- coding: utf-8 -*- ''' Support for Opkg .. important:: If you feel that Salt should be using this module to manage packages on a minion, and it is using a different module (or gives an error similar to *'pkg.install' is not available*), see :ref:`here <module-provider-override>`. .. versionadded: 2016.3.0 .. note:: For version comparison support on opkg < 0.3.4, the ``opkg-utils`` package must be installed. ''' # Import python libs from __future__ import absolute_import, print_function, unicode_literals import copy import os import re import logging import errno # Import salt libs import salt.utils.args import salt.utils.data import salt.utils.files import salt.utils.itertools import salt.utils.path import salt.utils.pkg import salt.utils.stringutils import salt.utils.versions from salt.exceptions import ( CommandExecutionError, MinionError, SaltInvocationError ) # Import 3rd-party libs from salt.ext import six from salt.ext.six.moves import shlex_quote as _cmd_quote # pylint: disable=import-error from salt.ext.six.moves import map # pylint: disable=import-error,redefined-builtin REPO_REGEXP = r'^#?\s*(src|src/gz)\s+([^\s<>]+|"[^<>]+")\s+[^\s<>]+' OPKG_CONFDIR = '/etc/opkg' ATTR_MAP = { 'Architecture': 'arch', 'Homepage': 'url', 'Installed-Time': 'install_date_time_t', 'Maintainer': 'packager', 'Package': 'name', 'Section': 'group' } log = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'pkg' NILRT_RESTARTCHECK_STATE_PATH = '/var/lib/salt/restartcheck_state' def _update_nilrt_restart_state(): ''' NILRT systems determine whether to reboot after various package operations including but not limited to kernel module installs/removals by checking specific file md5sums & timestamps. These files are touched/modified by the post-install/post-remove functions of their respective packages. The opkg module uses this function to store/update those file timestamps and checksums to be used later by the restartcheck module. ''' __salt__['cmd.shell']('stat -c %Y /lib/modules/$(uname -r)/modules.dep >{0}/modules.dep.timestamp' .format(NILRT_RESTARTCHECK_STATE_PATH)) __salt__['cmd.shell']('md5sum /lib/modules/$(uname -r)/modules.dep >{0}/modules.dep.md5sum' .format(NILRT_RESTARTCHECK_STATE_PATH)) # We can't assume nisysapi.ini always exists like modules.dep nisysapi_path = '/usr/local/natinst/share/nisysapi.ini' if os.path.exists(nisysapi_path): __salt__['cmd.shell']('stat -c %Y {0} >{1}/nisysapi.ini.timestamp' .format(nisysapi_path, NILRT_RESTARTCHECK_STATE_PATH)) __salt__['cmd.shell']('md5sum {0} >{1}/nisysapi.ini.md5sum' .format(nisysapi_path, NILRT_RESTARTCHECK_STATE_PATH)) # Expert plugin files get added to a conf.d dir, so keep track of the total # no. of files, their timestamps and content hashes nisysapi_conf_d_path = "/usr/lib/{0}/nisysapi/conf.d/experts/".format( 'arm-linux-gnueabi' if 'arm' in __grains__.get('cpuarch') else 'x86_64-linux-gnu' ) if os.path.exists(nisysapi_conf_d_path): with salt.utils.files.fopen('{0}/sysapi.conf.d.count'.format( NILRT_RESTARTCHECK_STATE_PATH), 'w') as fcount: fcount.write(str(len(os.listdir(nisysapi_conf_d_path)))) for fexpert in os.listdir(nisysapi_conf_d_path): __salt__['cmd.shell']('stat -c %Y {0}/{1} >{2}/{1}.timestamp' .format(nisysapi_conf_d_path, fexpert, NILRT_RESTARTCHECK_STATE_PATH)) __salt__['cmd.shell']('md5sum {0}/{1} >{2}/{1}.md5sum' .format(nisysapi_conf_d_path, fexpert, NILRT_RESTARTCHECK_STATE_PATH)) def _get_restartcheck_result(errors): ''' Return restartcheck result and append errors (if any) to ``errors`` ''' rs_result = __salt__['restartcheck.restartcheck'](verbose=False) if isinstance(rs_result, dict) and 'comment' in rs_result: errors.append(rs_result['comment']) return rs_result def _process_restartcheck_result(rs_result, **kwargs): ''' Check restartcheck output to see if system/service restarts were requested and take appropriate action. ''' if 'No packages seem to need to be restarted' in rs_result: return reboot_required = False for rstr in rs_result: if 'System restart required' in rstr: _update_nilrt_restart_state() __salt__['system.set_reboot_required_witnessed']() reboot_required = True if kwargs.get('always_restart_services', True) or not reboot_required: for rstr in rs_result: if 'System restart required' not in rstr: service = os.path.join('/etc/init.d', rstr) if os.path.exists(service): __salt__['cmd.run']([service, 'restart']) def __virtual__(): ''' Confirm this module is on a nilrt based system ''' if __grains__.get('os_family') == 'NILinuxRT': try: os.makedirs(NILRT_RESTARTCHECK_STATE_PATH) except OSError as exc: if exc.errno != errno.EEXIST: return False, 'Error creating {0} (-{1}): {2}'.format( NILRT_RESTARTCHECK_STATE_PATH, exc.errno, exc.strerror) # populate state dir if empty if not os.listdir(NILRT_RESTARTCHECK_STATE_PATH): _update_nilrt_restart_state() return __virtualname__ if os.path.isdir(OPKG_CONFDIR): return __virtualname__ return False, "Module opkg only works on OpenEmbedded based systems" def latest_version(*names, **kwargs): ''' Return the latest version of the named package available for upgrade or installation. If more than one package name is specified, a dict of name/version pairs is returned. If the latest version of a given package is already installed, an empty string will be returned for that package. CLI Example: .. code-block:: bash salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package name> salt '*' pkg.latest_version <package1> <package2> <package3> ... ''' refresh = salt.utils.data.is_true(kwargs.pop('refresh', True)) if not names: return '' ret = {} for name in names: ret[name] = '' # Refresh before looking for the latest version available if refresh: refresh_db() cmd = ['opkg', 'list-upgradable'] out = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): try: name, _oldversion, newversion = line.split(' - ') if name in names: ret[name] = newversion except ValueError: pass # Return a string if only one package name passed if len(names) == 1: return ret[names[0]] return ret # available_version is being deprecated available_version = latest_version def version(*names, **kwargs): ''' Returns a string representing the package version or an empty string if not installed. If more than one package name is specified, a dict of name/version pairs is returned. CLI Example: .. code-block:: bash salt '*' pkg.version <package name> salt '*' pkg.version <package1> <package2> <package3> ... ''' return __salt__['pkg_resource.version'](*names, **kwargs) def refresh_db(failhard=False, **kwargs): # pylint: disable=unused-argument ''' Updates the opkg database to latest packages based upon repositories Returns a dict, with the keys being package databases and the values being the result of the update attempt. Values can be one of the following: - ``True``: Database updated successfully - ``False``: Problem updating database failhard If False, return results of failed lines as ``False`` for the package database that encountered the error. If True, raise an error with a list of the package databases that encountered errors. .. versionadded:: 2018.3.0 CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) ret = {} error_repos = [] cmd = ['opkg', 'update'] # opkg returns a non-zero retcode when there is a failure to refresh # from one or more repos. Due to this, ignore the retcode. call = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False, ignore_retcode=True, redirect_stderr=True) out = call['stdout'] prev_line = '' for line in salt.utils.itertools.split(out, '\n'): if 'Inflating' in line: key = line.strip().split()[1][:-1] ret[key] = True elif 'Updated source' in line: # Use the previous line. key = prev_line.strip().split()[1][:-1] ret[key] = True elif 'Failed to download' in line: key = line.strip().split()[5].split(',')[0] ret[key] = False error_repos.append(key) prev_line = line if failhard and error_repos: raise CommandExecutionError( 'Error getting repos: {0}'.format(', '.join(error_repos)) ) # On a non-zero exit code where no failed repos were found, raise an # exception because this appears to be a different kind of error. if call['retcode'] != 0 and not error_repos: raise CommandExecutionError(out) return ret def _is_testmode(**kwargs): ''' Returns whether a test mode (noaction) operation was requested. ''' return bool(kwargs.get('test') or __opts__.get('test')) def _append_noaction_if_testmode(cmd, **kwargs): ''' Adds the --noaction flag to the command if it's running in the test mode. ''' if _is_testmode(**kwargs): cmd.append('--noaction') def _build_install_command_list(cmd_prefix, to_install, to_downgrade, to_reinstall): ''' Builds a list of install commands to be executed in sequence in order to process each of the to_install, to_downgrade, and to_reinstall lists. ''' cmds = [] if to_install: cmd = copy.deepcopy(cmd_prefix) cmd.extend(to_install) cmds.append(cmd) if to_downgrade: cmd = copy.deepcopy(cmd_prefix) cmd.append('--force-downgrade') cmd.extend(to_downgrade) cmds.append(cmd) if to_reinstall: cmd = copy.deepcopy(cmd_prefix) cmd.append('--force-reinstall') cmd.extend(to_reinstall) cmds.append(cmd) return cmds def _parse_reported_packages_from_install_output(output): ''' Parses the output of "opkg install" to determine what packages would have been installed by an operation run with the --noaction flag. We are looking for lines like: Installing <package> (<version>) on <target> or Upgrading <package> from <oldVersion> to <version> on root ''' reported_pkgs = {} install_pattern = re.compile(r'Installing\s(?P<package>.*?)\s\((?P<version>.*?)\)\son\s(?P<target>.*?)') upgrade_pattern = re.compile(r'Upgrading\s(?P<package>.*?)\sfrom\s(?P<oldVersion>.*?)\sto\s(?P<version>.*?)\son\s(?P<target>.*?)') for line in salt.utils.itertools.split(output, '\n'): match = install_pattern.match(line) if match is None: match = upgrade_pattern.match(line) if match: reported_pkgs[match.group('package')] = match.group('version') return reported_pkgs def _execute_install_command(cmd, parse_output, errors, parsed_packages): ''' Executes a command for the install operation. If the command fails, its error output will be appended to the errors list. If the command succeeds and parse_output is true, updated packages will be appended to the parsed_packages dictionary. ''' out = __salt__['cmd.run_all']( cmd, output_loglevel='trace', python_shell=False ) if out['retcode'] != 0: if out['stderr']: errors.append(out['stderr']) else: errors.append(out['stdout']) elif parse_output: parsed_packages.update(_parse_reported_packages_from_install_output(out['stdout'])) def install(name=None, refresh=False, pkgs=None, sources=None, reinstall=False, **kwargs): ''' Install the passed package, add refresh=True to update the opkg database. name The name of the package to be installed. Note that this parameter is ignored if either "pkgs" or "sources" is passed. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option. CLI Example: .. code-block:: bash salt '*' pkg.install <package name> refresh Whether or not to refresh the package database before installing. version Install a specific version of the package, e.g. 1.2.3~0ubuntu0. Ignored if "pkgs" or "sources" is passed. .. versionadded:: 2017.7.0 reinstall : False Specifying reinstall=True will use ``opkg install --force-reinstall`` rather than simply ``opkg install`` for requested packages that are already installed. If a version is specified with the requested package, then ``opkg install --force-reinstall`` will only be used if the installed version matches the requested version. .. versionadded:: 2017.7.0 Multiple Package Installation Options: pkgs A list of packages to install from a software repository. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.install pkgs='["foo", "bar"]' salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3-0ubuntu0"}]' sources A list of IPK packages to install. Must be passed as a list of dicts, with the keys being package names, and the values being the source URI or local path to the package. Dependencies are automatically resolved and marked as auto-installed. CLI Example: .. code-block:: bash salt '*' pkg.install sources='[{"foo": "salt://foo.deb"},{"bar": "salt://bar.deb"}]' install_recommends Whether to install the packages marked as recommended. Default is True. only_upgrade Only upgrade the packages (disallow downgrades), if they are already installed. Default is False. .. versionadded:: 2017.7.0 always_restart_services Whether to restart services even if a reboot is required. Default is True. Returns a dict containing the new package names and versions:: {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} ''' refreshdb = salt.utils.data.is_true(refresh) try: pkg_params, pkg_type = __salt__['pkg_resource.parse_targets']( name, pkgs, sources, **kwargs ) except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() cmd_prefix = ['opkg', 'install'] to_install = [] to_reinstall = [] to_downgrade = [] _append_noaction_if_testmode(cmd_prefix, **kwargs) if not pkg_params: return {} elif pkg_type == 'file': if reinstall: cmd_prefix.append('--force-reinstall') if not kwargs.get('only_upgrade', False): cmd_prefix.append('--force-downgrade') to_install.extend(pkg_params) elif pkg_type == 'repository': if not kwargs.get('install_recommends', True): cmd_prefix.append('--no-install-recommends') for pkgname, pkgversion in six.iteritems(pkg_params): if (name and pkgs is None and kwargs.get('version') and len(pkg_params) == 1): # Only use the 'version' param if 'name' was not specified as a # comma-separated list version_num = kwargs['version'] else: version_num = pkgversion if version_num is None: # Don't allow downgrades if the version # number is not specified. if reinstall and pkgname in old: to_reinstall.append(pkgname) else: to_install.append(pkgname) else: pkgstr = '{0}={1}'.format(pkgname, version_num) cver = old.get(pkgname, '') if reinstall and cver and salt.utils.versions.compare( ver1=version_num, oper='==', ver2=cver, cmp_func=version_cmp): to_reinstall.append(pkgstr) elif not cver or salt.utils.versions.compare( ver1=version_num, oper='>=', ver2=cver, cmp_func=version_cmp): to_install.append(pkgstr) else: if not kwargs.get('only_upgrade', False): to_downgrade.append(pkgstr) else: # This should cause the command to fail. to_install.append(pkgstr) cmds = _build_install_command_list(cmd_prefix, to_install, to_downgrade, to_reinstall) if not cmds: return {} if refreshdb: refresh_db() errors = [] is_testmode = _is_testmode(**kwargs) test_packages = {} for cmd in cmds: _execute_install_command(cmd, is_testmode, errors, test_packages) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() if is_testmode: new = copy.deepcopy(new) new.update(test_packages) ret = salt.utils.data.compare_dicts(old, new) if pkg_type == 'file' and reinstall: # For file-based packages, prepare 'to_reinstall' to have a list # of all the package names that may have been reinstalled. # This way, we could include reinstalled packages in 'ret'. for pkgfile in to_install: # Convert from file name to package name. cmd = ['opkg', 'info', pkgfile] out = __salt__['cmd.run_all']( cmd, output_loglevel='trace', python_shell=False ) if out['retcode'] == 0: # Just need the package name. pkginfo_dict = _process_info_installed_output( out['stdout'], [] ) if pkginfo_dict: to_reinstall.append(list(pkginfo_dict.keys())[0]) for pkgname in to_reinstall: if pkgname not in ret or pkgname in old: ret.update({pkgname: {'old': old.get(pkgname, ''), 'new': new.get(pkgname, '')}}) rs_result = _get_restartcheck_result(errors) if errors: raise CommandExecutionError( 'Problem encountered installing package(s)', info={'errors': errors, 'changes': ret} ) _process_restartcheck_result(rs_result, **kwargs) return ret def _parse_reported_packages_from_remove_output(output): ''' Parses the output of "opkg remove" to determine what packages would have been removed by an operation run with the --noaction flag. We are looking for lines like Removing <package> (<version>) from <Target>... ''' reported_pkgs = {} remove_pattern = re.compile(r'Removing\s(?P<package>.*?)\s\((?P<version>.*?)\)\sfrom\s(?P<target>.*?)...') for line in salt.utils.itertools.split(output, '\n'): match = remove_pattern.match(line) if match: reported_pkgs[match.group('package')] = '' return reported_pkgs def remove(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument ''' Remove packages using ``opkg remove``. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. remove_dependencies Remove package and all dependencies .. versionadded:: 2019.2.0 auto_remove_deps Remove packages that were installed automatically to satisfy dependencies .. versionadded:: 2019.2.0 Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.remove <package name> salt '*' pkg.remove <package1>,<package2>,<package3> salt '*' pkg.remove pkgs='["foo", "bar"]' salt '*' pkg.remove pkgs='["foo", "bar"]' remove_dependencies=True auto_remove_deps=True ''' try: pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0] except MinionError as exc: raise CommandExecutionError(exc) old = list_pkgs() targets = [x for x in pkg_params if x in old] if not targets: return {} cmd = ['opkg', 'remove'] _append_noaction_if_testmode(cmd, **kwargs) if kwargs.get('remove_dependencies', False): cmd.append('--force-removal-of-dependent-packages') if kwargs.get('auto_remove_deps', False): cmd.append('--autoremove') cmd.extend(targets) out = __salt__['cmd.run_all']( cmd, output_loglevel='trace', python_shell=False ) if out['retcode'] != 0: if out['stderr']: errors = [out['stderr']] else: errors = [out['stdout']] else: errors = [] __context__.pop('pkg.list_pkgs', None) new = list_pkgs() if _is_testmode(**kwargs): reportedPkgs = _parse_reported_packages_from_remove_output(out['stdout']) new = {k: v for k, v in new.items() if k not in reportedPkgs} ret = salt.utils.data.compare_dicts(old, new) rs_result = _get_restartcheck_result(errors) if errors: raise CommandExecutionError( 'Problem encountered removing package(s)', info={'errors': errors, 'changes': ret} ) _process_restartcheck_result(rs_result, **kwargs) return ret def purge(name=None, pkgs=None, **kwargs): # pylint: disable=unused-argument ''' Package purges are not supported by opkg, this function is identical to :mod:`pkg.remove <salt.modules.opkg.remove>`. name The name of the package to be deleted. Multiple Package Options: pkgs A list of packages to delete. Must be passed as a python list. The ``name`` parameter will be ignored if this option is passed. Returns a dict containing the changes. CLI Example: .. code-block:: bash salt '*' pkg.purge <package name> salt '*' pkg.purge <package1>,<package2>,<package3> salt '*' pkg.purge pkgs='["foo", "bar"]' ''' return remove(name=name, pkgs=pkgs) def upgrade(refresh=True, **kwargs): # pylint: disable=unused-argument ''' Upgrades all packages via ``opkg upgrade`` Returns a dictionary containing the changes: .. code-block:: python {'<package>': {'old': '<old-version>', 'new': '<new-version>'}} CLI Example: .. code-block:: bash salt '*' pkg.upgrade ''' ret = {'changes': {}, 'result': True, 'comment': '', } errors = [] if salt.utils.data.is_true(refresh): refresh_db() old = list_pkgs() cmd = ['opkg', 'upgrade'] result = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) __context__.pop('pkg.list_pkgs', None) new = list_pkgs() ret = salt.utils.data.compare_dicts(old, new) if result['retcode'] != 0: errors.append(result) rs_result = _get_restartcheck_result(errors) if errors: raise CommandExecutionError( 'Problem encountered upgrading packages', info={'errors': errors, 'changes': ret} ) _process_restartcheck_result(rs_result, **kwargs) return ret def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613 ''' Set package in 'hold' state, meaning it will not be upgraded. name The name of the package, e.g., 'tmux' CLI Example: .. code-block:: bash salt '*' pkg.hold <package name> pkgs A list of packages to hold. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.hold pkgs='["foo", "bar"]' ''' if not name and not pkgs and not sources: raise SaltInvocationError( 'One of name, pkgs, or sources must be specified.' ) if pkgs and sources: raise SaltInvocationError( 'Only one of pkgs or sources can be specified.' ) targets = [] if pkgs: targets.extend(pkgs) elif sources: for source in sources: targets.append(next(iter(source))) else: targets.append(name) ret = {} for target in targets: if isinstance(target, dict): target = next(iter(target)) ret[target] = {'name': target, 'changes': {}, 'result': False, 'comment': ''} state = _get_state(target) if not state: ret[target]['comment'] = ('Package {0} not currently held.' .format(target)) elif state != 'hold': if 'test' in __opts__ and __opts__['test']: ret[target].update(result=None) ret[target]['comment'] = ('Package {0} is set to be held.' .format(target)) else: result = _set_state(target, 'hold') ret[target].update(changes=result[target], result=True) ret[target]['comment'] = ('Package {0} is now being held.' .format(target)) else: ret[target].update(result=True) ret[target]['comment'] = ('Package {0} is already set to be held.' .format(target)) return ret def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613 ''' Set package current in 'hold' state to install state, meaning it will be upgraded. name The name of the package, e.g., 'tmux' CLI Example: .. code-block:: bash salt '*' pkg.unhold <package name> pkgs A list of packages to hold. Must be passed as a python list. CLI Example: .. code-block:: bash salt '*' pkg.unhold pkgs='["foo", "bar"]' ''' if not name and not pkgs and not sources: raise SaltInvocationError( 'One of name, pkgs, or sources must be specified.' ) if pkgs and sources: raise SaltInvocationError( 'Only one of pkgs or sources can be specified.' ) targets = [] if pkgs: targets.extend(pkgs) elif sources: for source in sources: targets.append(next(iter(source))) else: targets.append(name) ret = {} for target in targets: if isinstance(target, dict): target = next(iter(target)) ret[target] = {'name': target, 'changes': {}, 'result': False, 'comment': ''} state = _get_state(target) if not state: ret[target]['comment'] = ('Package {0} does not have a state.' .format(target)) elif state == 'hold': if 'test' in __opts__ and __opts__['test']: ret[target].update(result=None) ret['comment'] = ('Package {0} is set not to be held.' .format(target)) else: result = _set_state(target, 'ok') ret[target].update(changes=result[target], result=True) ret[target]['comment'] = ('Package {0} is no longer being ' 'held.'.format(target)) else: ret[target].update(result=True) ret[target]['comment'] = ('Package {0} is already set not to be ' 'held.'.format(target)) return ret def _get_state(pkg): ''' View package state from the opkg database Return the state of pkg ''' cmd = ['opkg', 'status'] cmd.append(pkg) out = __salt__['cmd.run'](cmd, python_shell=False) state_flag = '' for line in salt.utils.itertools.split(out, '\n'): if line.startswith('Status'): _status, _state_want, state_flag, _state_status = line.split() return state_flag def _set_state(pkg, state): ''' Change package state on the opkg database The state can be any of: - hold - noprune - user - ok - installed - unpacked This command is commonly used to mark a specific package to be held from being upgraded, that is, to be kept at a certain version. Returns a dict containing the package name, and the new and old versions. ''' ret = {} valid_states = ('hold', 'noprune', 'user', 'ok', 'installed', 'unpacked') if state not in valid_states: raise SaltInvocationError('Invalid state: {0}'.format(state)) oldstate = _get_state(pkg) cmd = ['opkg', 'flag'] cmd.append(state) cmd.append(pkg) _out = __salt__['cmd.run'](cmd, python_shell=False) # Missing return value check due to opkg issue 160 ret[pkg] = {'old': oldstate, 'new': state} return ret def list_pkgs(versions_as_list=False, **kwargs): ''' List the packages currently installed in a dict:: {'<package_name>': '<version>'} CLI Example: .. code-block:: bash salt '*' pkg.list_pkgs salt '*' pkg.list_pkgs versions_as_list=True ''' versions_as_list = salt.utils.data.is_true(versions_as_list) # not yet implemented or not applicable if any([salt.utils.data.is_true(kwargs.get(x)) for x in ('removed', 'purge_desired')]): return {} if 'pkg.list_pkgs' in __context__: if versions_as_list: return __context__['pkg.list_pkgs'] else: ret = copy.deepcopy(__context__['pkg.list_pkgs']) __salt__['pkg_resource.stringify'](ret) return ret cmd = ['opkg', 'list-installed'] ret = {} out = __salt__['cmd.run'](cmd, output_loglevel='trace', python_shell=False) for line in salt.utils.itertools.split(out, '\n'): # This is a continuation of package description if not line or line[0] == ' ': continue # This contains package name, version, and description. # Extract the first two. pkg_name, pkg_version = line.split(' - ', 2)[:2] __salt__['pkg_resource.add_pkg'](ret, pkg_name, pkg_version) __salt__['pkg_resource.sort_pkglist'](ret) __context__['pkg.list_pkgs'] = copy.deepcopy(ret) if not versions_as_list: __salt__['pkg_resource.stringify'](ret) return ret def list_upgrades(refresh=True, **kwargs): # pylint: disable=unused-argument ''' List all available package upgrades. CLI Example: .. code-block:: bash salt '*' pkg.list_upgrades ''' ret = {} if salt.utils.data.is_true(refresh): refresh_db() cmd = ['opkg', 'list-upgradable'] call = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) if call['retcode'] != 0: comment = '' if 'stderr' in call: comment += call['stderr'] if 'stdout' in call: comment += call['stdout'] raise CommandExecutionError(comment) else: out = call['stdout'] for line in out.splitlines(): name, _oldversion, newversion = line.split(' - ') ret[name] = newversion return ret def _convert_to_standard_attr(attr): ''' Helper function for _process_info_installed_output() Converts an opkg attribute name to a standard attribute name which is used across 'pkg' modules. ''' ret_attr = ATTR_MAP.get(attr, None) if ret_attr is None: # All others convert to lowercase return attr.lower() return ret_attr def _process_info_installed_output(out, filter_attrs): ''' Helper function for info_installed() Processes stdout output from a single invocation of 'opkg status'. ''' ret = {} name = None attrs = {} attr = None for line in salt.utils.itertools.split(out, '\n'): if line and line[0] == ' ': # This is a continuation of the last attr if filter_attrs is None or attr in filter_attrs: line = line.strip() if attrs[attr]: # If attr is empty, don't add leading newline attrs[attr] += '\n' attrs[attr] += line continue line = line.strip() if not line: # Separator between different packages if name: ret[name] = attrs name = None attrs = {} attr = None continue key, value = line.split(':', 1) value = value.lstrip() attr = _convert_to_standard_attr(key) if attr == 'name': name = value elif filter_attrs is None or attr in filter_attrs: attrs[attr] = value if name: ret[name] = attrs return ret def info_installed(*names, **kwargs): ''' Return the information of the named package(s), installed on the system. .. versionadded:: 2017.7.0 :param names: Names of the packages to get information about. If none are specified, will return information for all installed packages. :param attr: Comma-separated package attributes. If no 'attr' is specified, all available attributes returned. Valid attributes are: arch, conffiles, conflicts, depends, description, filename, group, install_date_time_t, md5sum, packager, provides, recommends, replaces, size, source, suggests, url, version CLI example: .. code-block:: bash salt '*' pkg.info_installed salt '*' pkg.info_installed attr=version,packager salt '*' pkg.info_installed <package1> salt '*' pkg.info_installed <package1> <package2> <package3> ... salt '*' pkg.info_installed <package1> attr=version,packager salt '*' pkg.info_installed <package1> <package2> <package3> ... attr=version,packager ''' attr = kwargs.pop('attr', None) if attr is None: filter_attrs = None elif isinstance(attr, six.string_types): filter_attrs = set(attr.split(',')) else: filter_attrs = set(attr) ret = {} if names: # Specific list of names of installed packages for name in names: cmd = ['opkg', 'status', name] call = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) if call['retcode'] != 0: comment = '' if call['stderr']: comment += call['stderr'] else: comment += call['stdout'] raise CommandExecutionError(comment) ret.update(_process_info_installed_output(call['stdout'], filter_attrs)) else: # All installed packages cmd = ['opkg', 'status'] call = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) if call['retcode'] != 0: comment = '' if call['stderr']: comment += call['stderr'] else: comment += call['stdout'] raise CommandExecutionError(comment) ret.update(_process_info_installed_output(call['stdout'], filter_attrs)) return ret def upgrade_available(name, **kwargs): # pylint: disable=unused-argument ''' Check whether or not an upgrade is available for a given package CLI Example: .. code-block:: bash salt '*' pkg.upgrade_available <package name> ''' return latest_version(name) != '' def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs): # pylint: disable=unused-argument ''' Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem making the comparison. ignore_epoch : False Set to ``True`` to ignore the epoch when comparing versions .. versionadded:: 2016.3.4 CLI Example: .. code-block:: bash salt '*' pkg.version_cmp '0.2.4-0' '0.2.4.1-0' ''' normalize = lambda x: six.text_type(x).split(':', 1)[-1] if ignore_epoch else six.text_type(x) pkg1 = normalize(pkg1) pkg2 = normalize(pkg2) output = __salt__['cmd.run_stdout'](['opkg', '--version'], output_loglevel='trace', python_shell=False) opkg_version = output.split(' ')[2].strip() if salt.utils.versions.LooseVersion(opkg_version) >= \ salt.utils.versions.LooseVersion('0.3.4'): cmd_compare = ['opkg', 'compare-versions'] elif salt.utils.path.which('opkg-compare-versions'): cmd_compare = ['opkg-compare-versions'] else: log.warning('Unable to find a compare-versions utility installed. Either upgrade opkg to ' 'version > 0.3.4 (preferred) or install the older opkg-compare-versions script.') return None for oper, ret in (("<<", -1), ("=", 0), (">>", 1)): cmd = cmd_compare[:] cmd.append(_cmd_quote(pkg1)) cmd.append(oper) cmd.append(_cmd_quote(pkg2)) retcode = __salt__['cmd.retcode'](cmd, output_loglevel='trace', ignore_retcode=True, python_shell=False) if retcode == 0: return ret return None def _set_repo_option(repo, option): ''' Set the option to repo ''' if not option: return opt = option.split('=') if len(opt) != 2: return if opt[0] == 'trusted': repo['trusted'] = opt[1] == 'yes' else: repo[opt[0]] = opt[1] def _set_repo_options(repo, options): ''' Set the options to the repo. ''' delimiters = "[", "]" pattern = '|'.join(map(re.escape, delimiters)) for option in options: splitted = re.split(pattern, option) for opt in splitted: _set_repo_option(repo, opt) def _create_repo(line, filename): ''' Create repo ''' repo = {} if line.startswith('#'): repo['enabled'] = False line = line[1:] else: repo['enabled'] = True cols = salt.utils.args.shlex_split(line.strip()) repo['compressed'] = not cols[0] in 'src' repo['name'] = cols[1] repo['uri'] = cols[2] repo['file'] = os.path.join(OPKG_CONFDIR, filename) if len(cols) > 3: _set_repo_options(repo, cols[3:]) return repo def _read_repos(conf_file, repos, filename, regex): ''' Read repos from configuration file ''' for line in conf_file: line = salt.utils.stringutils.to_unicode(line) if not regex.search(line): continue repo = _create_repo(line, filename) # do not store duplicated uri's if repo['uri'] not in repos: repos[repo['uri']] = [repo] def list_repos(**kwargs): # pylint: disable=unused-argument ''' Lists all repos on ``/etc/opkg/*.conf`` CLI Example: .. code-block:: bash salt '*' pkg.list_repos ''' repos = {} regex = re.compile(REPO_REGEXP) for filename in os.listdir(OPKG_CONFDIR): if not filename.endswith(".conf"): continue with salt.utils.files.fopen(os.path.join(OPKG_CONFDIR, filename)) as conf_file: _read_repos(conf_file, repos, filename, regex) return repos def get_repo(repo, **kwargs): # pylint: disable=unused-argument ''' Display a repo from the ``/etc/opkg/*.conf`` CLI Examples: .. code-block:: bash salt '*' pkg.get_repo repo ''' repos = list_repos() if repos: for source in six.itervalues(repos): for sub in source: if sub['name'] == repo: return sub return {} def _del_repo_from_file(repo, filepath): ''' Remove a repo from filepath ''' with salt.utils.files.fopen(filepath) as fhandle: output = [] regex = re.compile(REPO_REGEXP) for line in fhandle: line = salt.utils.stringutils.to_unicode(line) if regex.search(line): if line.startswith('#'): line = line[1:] cols = salt.utils.args.shlex_split(line.strip()) if repo != cols[1]: output.append(salt.utils.stringutils.to_str(line)) with salt.utils.files.fopen(filepath, 'w') as fhandle: fhandle.writelines(output) def _set_trusted_option_if_needed(repostr, trusted): ''' Set trusted option to repo if needed ''' if trusted is True: repostr += ' [trusted=yes]' elif trusted is False: repostr += ' [trusted=no]' return repostr def _add_new_repo(repo, properties): ''' Add a new repo entry ''' repostr = '# ' if not properties.get('enabled') else '' repostr += 'src/gz ' if properties.get('compressed') else 'src ' if ' ' in repo: repostr += '"' + repo + '" ' else: repostr += repo + ' ' repostr += properties.get('uri') repostr = _set_trusted_option_if_needed(repostr, properties.get('trusted')) repostr += '\n' conffile = os.path.join(OPKG_CONFDIR, repo + '.conf') with salt.utils.files.fopen(conffile, 'a') as fhandle: fhandle.write(salt.utils.stringutils.to_str(repostr)) def _mod_repo_in_file(repo, repostr, filepath): ''' Replace a repo entry in filepath with repostr ''' with salt.utils.files.fopen(filepath) as fhandle: output = [] for line in fhandle: cols = salt.utils.args.shlex_split( salt.utils.stringutils.to_unicode(line).strip() ) if repo not in cols: output.append(line) else: output.append(salt.utils.stringutils.to_str(repostr + '\n')) with salt.utils.files.fopen(filepath, 'w') as fhandle: fhandle.writelines(output) def del_repo(repo, **kwargs): # pylint: disable=unused-argument ''' Delete a repo from ``/etc/opkg/*.conf`` If the file does not contain any other repo configuration, the file itself will be deleted. CLI Examples: .. code-block:: bash salt '*' pkg.del_repo repo ''' refresh = salt.utils.data.is_true(kwargs.get('refresh', True)) repos = list_repos() if repos: deleted_from = dict() for repository in repos: source = repos[repository][0] if source['name'] == repo: deleted_from[source['file']] = 0 _del_repo_from_file(repo, source['file']) if deleted_from: ret = '' for repository in repos: source = repos[repository][0] if source['file'] in deleted_from: deleted_from[source['file']] += 1 for repo_file, count in six.iteritems(deleted_from): msg = 'Repo \'{0}\' has been removed from {1}.\n' if count == 1 and os.path.isfile(repo_file): msg = ('File {1} containing repo \'{0}\' has been ' 'removed.\n') try: os.remove(repo_file) except OSError: pass ret += msg.format(repo, repo_file) if refresh: refresh_db() return ret return "Repo {0} doesn't exist in the opkg repo lists".format(repo) def mod_repo(repo, **kwargs): ''' Modify one or more values for a repo. If the repo does not exist, it will be created, so long as uri is defined. The following options are available to modify a repo definition: repo alias by which opkg refers to the repo. uri the URI to the repo. compressed defines (True or False) if the index file is compressed enabled enable or disable (True or False) repository but do not remove if disabled. refresh enable or disable (True or False) auto-refresh of the repositories CLI Examples: .. code-block:: bash salt '*' pkg.mod_repo repo uri=http://new/uri salt '*' pkg.mod_repo repo enabled=False ''' repos = list_repos() found = False uri = '' if 'uri' in kwargs: uri = kwargs['uri'] for repository in repos: source = repos[repository][0] if source['name'] == repo: found = True repostr = '' if 'enabled' in kwargs and not kwargs['enabled']: repostr += '# ' if 'compressed' in kwargs: repostr += 'src/gz ' if kwargs['compressed'] else 'src' else: repostr += 'src/gz' if source['compressed'] else 'src' repo_alias = kwargs['alias'] if 'alias' in kwargs else repo if ' ' in repo_alias: repostr += ' "{0}"'.format(repo_alias) else: repostr += ' {0}'.format(repo_alias) repostr += ' {0}'.format(kwargs['uri'] if 'uri' in kwargs else source['uri']) trusted = kwargs.get('trusted') repostr = _set_trusted_option_if_needed(repostr, trusted) if trusted is not None else \ _set_trusted_option_if_needed(repostr, source.get('trusted')) _mod_repo_in_file(repo, repostr, source['file']) elif uri and source['uri'] == uri: raise CommandExecutionError( 'Repository \'{0}\' already exists as \'{1}\'.'.format(uri, source['name'])) if not found: # Need to add a new repo if 'uri' not in kwargs: raise CommandExecutionError( 'Repository \'{0}\' not found and no URI passed to create one.'.format(repo)) properties = {'uri': kwargs['uri']} # If compressed is not defined, assume True properties['compressed'] = kwargs['compressed'] if 'compressed' in kwargs else True # If enabled is not defined, assume True properties['enabled'] = kwargs['enabled'] if 'enabled' in kwargs else True properties['trusted'] = kwargs.get('trusted') _add_new_repo(repo, properties) if 'refresh' in kwargs: refresh_db() def file_list(*packages, **kwargs): # pylint: disable=unused-argument ''' List the files that belong to a package. Not specifying any packages will return a list of _every_ file on the system's package database (not generally recommended). CLI Examples: .. code-block:: bash salt '*' pkg.file_list httpd salt '*' pkg.file_list httpd postfix salt '*' pkg.file_list ''' output = file_dict(*packages) files = [] for package in list(output['packages'].values()): files.extend(package) return {'errors': output['errors'], 'files': files} def file_dict(*packages, **kwargs): # pylint: disable=unused-argument ''' List the files that belong to a package, grouped by package. Not specifying any packages will return a list of _every_ file on the system's package database (not generally recommended). CLI Examples: .. code-block:: bash salt '*' pkg.file_list httpd salt '*' pkg.file_list httpd postfix salt '*' pkg.file_list ''' errors = [] ret = {} cmd_files = ['opkg', 'files'] if not packages: packages = list(list_pkgs().keys()) for package in packages: files = [] cmd = cmd_files[:] cmd.append(package) out = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) for line in out['stdout'].splitlines(): if line.startswith('/'): files.append(line) elif line.startswith(' * '): errors.append(line[3:]) break else: continue if files: ret[package] = files return {'errors': errors, 'packages': ret} def version_clean(version): ''' Clean the version string removing extra data. There's nothing do to here for nipkg.py, therefore it will always return the given version. ''' return version def check_extra_requirements(pkgname, pkgver): ''' Check if the installed package already has the given requirements. There's nothing do to here for nipkg.py, therefore it will always return True. ''' return True
saltstack/salt
salt/modules/splunk.py
_send_email
python
def _send_email(name, email): "send a email to inform user of account creation" config = __salt__['config.option']('splunk') email_object = config.get('email') if email_object: cc = email_object.get('cc') subject = email_object.get('subject') message = email_object.get('message').format(name, name, _generate_password(email), name) try: mail_process = subprocess.Popen(['mail', '-s', subject, '-c', cc, email], stdin=subprocess.PIPE) except Exception as e: log.error("unable to send email to %s: %s", email, e) mail_process.communicate(message) log.info("sent account creation email to %s", email)
send a email to inform user of account creation
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/splunk.py#L84-L100
[ "def _generate_password(email):\n m = hmac.new(base64.b64decode(_get_secret_key('splunk')), six.text_type([email, SERVICE_NAME]))\n return base64.urlsafe_b64encode(m.digest()).strip().replace('=', '')\n" ]
# -*- coding: utf-8 -*- ''' Module for interop with the Splunk API .. versionadded:: 2016.3.0. :depends: - splunk-sdk python module :configuration: Configure this module by specifying the name of a configuration profile in the minion config, minion pillar, or master config. The module will use the 'splunk' key by default, if defined. For example: .. code-block:: yaml splunk: username: alice password: abc123 host: example.splunkcloud.com port: 8080 ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import hmac import base64 import subprocess # Import 3rd-party libs from salt.ext import six HAS_LIBS = False try: import splunklib.client from splunklib.client import AuthenticationError from splunklib.binding import HTTPError HAS_LIBS = True except ImportError: pass log = logging.getLogger(__name__) __virtualname__ = 'splunk' SERVICE_NAME = "splunk" ALLOWED_FIELDS_FOR_MODIFICATION = [ 'realname', 'roles', 'defaultApp', 'tz', #'capabilities', 'name' ] REQUIRED_FIELDS_FOR_CREATE = [ 'realname', 'name', 'roles' ] def __virtual__(): ''' Only load this module if splunk is installed on this minion. ''' if HAS_LIBS: return __virtualname__ return (False, 'The splunk execution module failed to load: ' 'requires splunk python library to be installed.') def _get_secret_key(profile): config = __salt__['config.option'](profile) return config.get('password_secret_key') def _generate_password(email): m = hmac.new(base64.b64decode(_get_secret_key('splunk')), six.text_type([email, SERVICE_NAME])) return base64.urlsafe_b64encode(m.digest()).strip().replace('=', '') def _populate_cache(profile="splunk"): config = __salt__['config.option'](profile) key = "splunk.users.{0}".format( config.get('host') ) if key not in __context__: client = _get_splunk(profile) kwargs = {'sort_key': 'realname', 'sort_dir': 'asc'} users = client.users.list(count=-1, **kwargs) result = {} for user in users: result[user.email.lower()] = user __context__[key] = result return True def _get_splunk(profile): ''' Return the splunk client, cached into __context__ for performance ''' config = __salt__['config.option'](profile) key = "splunk.{0}:{1}:{2}:{3}".format( config.get('host'), config.get('port'), config.get('username'), config.get('password') ) if key not in __context__: __context__[key] = splunklib.client.connect( host=config.get('host'), port=config.get('port'), username=config.get('username'), password=config.get('password')) return __context__[key] def list_users(profile="splunk"): ''' List all users in the splunk DB CLI Example: salt myminion splunk.list_users ''' config = __salt__['config.option'](profile) key = "splunk.users.{0}".format( config.get('host') ) if key not in __context__: _populate_cache(profile) return __context__[key] def get_user(email, profile="splunk", **kwargs): ''' Get a splunk user by name/email CLI Example: salt myminion splunk.get_user 'user@example.com' user_details=false salt myminion splunk.get_user 'user@example.com' user_details=true ''' user_map = list_users(profile) user_found = email.lower() in user_map.keys() if not kwargs.get('user_details', False) and user_found: # The user is in splunk group, just return return True elif kwargs.get('user_details', False) and user_found: user = user_map[email.lower()] response = {} for field in ['defaultApp', 'realname', 'name', 'email']: response[field] = user[field] response['roles'] = [] for role in user.role_entities: response['roles'].append(role.name) return response return False def create_user(email, profile="splunk", **kwargs): ''' create a splunk user by name/email CLI Example: salt myminion splunk.create_user user@example.com roles=['user'] realname="Test User" name=testuser ''' client = _get_splunk(profile) email = email.lower() user = list_users(profile).get(email) if user: log.error("User is already present %s", email) return False property_map = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: if kwargs.get(field): property_map[field] = kwargs.get(field) try: # create for req_field in REQUIRED_FIELDS_FOR_CREATE: if not property_map.get(req_field): log.error("Missing required params %s", ', '.join([six.text_type(k) for k in REQUIRED_FIELDS_FOR_CREATE])) return False newuser = client.users.create(username=property_map['name'], password=_generate_password(email), roles=property_map['roles'], email=email, realname=property_map['realname']) _send_email(newuser.name, newuser.email) response = {} for field in ['email', 'password', 'realname', 'roles']: response[field] = newuser[field] except Exception as e: log.error("Caught exception %s", e) return False def update_user(email, profile="splunk", **kwargs): ''' Create a splunk user by email CLI Example: salt myminion splunk.update_user example@domain.com roles=['user'] realname="Test User" ''' client = _get_splunk(profile) email = email.lower() user = list_users(profile).get(email) if not user: log.error('Failed to retrieve user %s', email) return False property_map = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: if kwargs.get(field): property_map[field] = kwargs.get(field) # update kwargs = {} roles = [role.name for role in user.role_entities] for k, v in property_map.items(): resource_value = user[k] if resource_value is not None: # you can't update the username in update api call if k.lower() == 'name': continue if k.lower() == 'roles': if isinstance(v, six.string_types): v = v.split(',') if set(roles) != set(v): kwargs['roles'] = list(set(v)) elif resource_value != v: kwargs[k] = v if kwargs: user.update(**kwargs).refresh() fields_modified = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: fields_modified[field] = user[field] else: #succeeded, no change return True def delete_user(email, profile="splunk"): ''' Delete a splunk user by email CLI Example: salt myminion splunk_user.delete 'user@example.com' ''' client = _get_splunk(profile) user = list_users(profile).get(email) if user: try: client.users.delete(user.name) except (AuthenticationError, HTTPError) as e: log.info('Exception: %s', e) return False else: return False return user.name not in client.users
saltstack/salt
salt/modules/splunk.py
_get_splunk
python
def _get_splunk(profile): ''' Return the splunk client, cached into __context__ for performance ''' config = __salt__['config.option'](profile) key = "splunk.{0}:{1}:{2}:{3}".format( config.get('host'), config.get('port'), config.get('username'), config.get('password') ) if key not in __context__: __context__[key] = splunklib.client.connect( host=config.get('host'), port=config.get('port'), username=config.get('username'), password=config.get('password')) return __context__[key]
Return the splunk client, cached into __context__ for performance
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/splunk.py#L124-L144
null
# -*- coding: utf-8 -*- ''' Module for interop with the Splunk API .. versionadded:: 2016.3.0. :depends: - splunk-sdk python module :configuration: Configure this module by specifying the name of a configuration profile in the minion config, minion pillar, or master config. The module will use the 'splunk' key by default, if defined. For example: .. code-block:: yaml splunk: username: alice password: abc123 host: example.splunkcloud.com port: 8080 ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import hmac import base64 import subprocess # Import 3rd-party libs from salt.ext import six HAS_LIBS = False try: import splunklib.client from splunklib.client import AuthenticationError from splunklib.binding import HTTPError HAS_LIBS = True except ImportError: pass log = logging.getLogger(__name__) __virtualname__ = 'splunk' SERVICE_NAME = "splunk" ALLOWED_FIELDS_FOR_MODIFICATION = [ 'realname', 'roles', 'defaultApp', 'tz', #'capabilities', 'name' ] REQUIRED_FIELDS_FOR_CREATE = [ 'realname', 'name', 'roles' ] def __virtual__(): ''' Only load this module if splunk is installed on this minion. ''' if HAS_LIBS: return __virtualname__ return (False, 'The splunk execution module failed to load: ' 'requires splunk python library to be installed.') def _get_secret_key(profile): config = __salt__['config.option'](profile) return config.get('password_secret_key') def _generate_password(email): m = hmac.new(base64.b64decode(_get_secret_key('splunk')), six.text_type([email, SERVICE_NAME])) return base64.urlsafe_b64encode(m.digest()).strip().replace('=', '') def _send_email(name, email): "send a email to inform user of account creation" config = __salt__['config.option']('splunk') email_object = config.get('email') if email_object: cc = email_object.get('cc') subject = email_object.get('subject') message = email_object.get('message').format(name, name, _generate_password(email), name) try: mail_process = subprocess.Popen(['mail', '-s', subject, '-c', cc, email], stdin=subprocess.PIPE) except Exception as e: log.error("unable to send email to %s: %s", email, e) mail_process.communicate(message) log.info("sent account creation email to %s", email) def _populate_cache(profile="splunk"): config = __salt__['config.option'](profile) key = "splunk.users.{0}".format( config.get('host') ) if key not in __context__: client = _get_splunk(profile) kwargs = {'sort_key': 'realname', 'sort_dir': 'asc'} users = client.users.list(count=-1, **kwargs) result = {} for user in users: result[user.email.lower()] = user __context__[key] = result return True def list_users(profile="splunk"): ''' List all users in the splunk DB CLI Example: salt myminion splunk.list_users ''' config = __salt__['config.option'](profile) key = "splunk.users.{0}".format( config.get('host') ) if key not in __context__: _populate_cache(profile) return __context__[key] def get_user(email, profile="splunk", **kwargs): ''' Get a splunk user by name/email CLI Example: salt myminion splunk.get_user 'user@example.com' user_details=false salt myminion splunk.get_user 'user@example.com' user_details=true ''' user_map = list_users(profile) user_found = email.lower() in user_map.keys() if not kwargs.get('user_details', False) and user_found: # The user is in splunk group, just return return True elif kwargs.get('user_details', False) and user_found: user = user_map[email.lower()] response = {} for field in ['defaultApp', 'realname', 'name', 'email']: response[field] = user[field] response['roles'] = [] for role in user.role_entities: response['roles'].append(role.name) return response return False def create_user(email, profile="splunk", **kwargs): ''' create a splunk user by name/email CLI Example: salt myminion splunk.create_user user@example.com roles=['user'] realname="Test User" name=testuser ''' client = _get_splunk(profile) email = email.lower() user = list_users(profile).get(email) if user: log.error("User is already present %s", email) return False property_map = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: if kwargs.get(field): property_map[field] = kwargs.get(field) try: # create for req_field in REQUIRED_FIELDS_FOR_CREATE: if not property_map.get(req_field): log.error("Missing required params %s", ', '.join([six.text_type(k) for k in REQUIRED_FIELDS_FOR_CREATE])) return False newuser = client.users.create(username=property_map['name'], password=_generate_password(email), roles=property_map['roles'], email=email, realname=property_map['realname']) _send_email(newuser.name, newuser.email) response = {} for field in ['email', 'password', 'realname', 'roles']: response[field] = newuser[field] except Exception as e: log.error("Caught exception %s", e) return False def update_user(email, profile="splunk", **kwargs): ''' Create a splunk user by email CLI Example: salt myminion splunk.update_user example@domain.com roles=['user'] realname="Test User" ''' client = _get_splunk(profile) email = email.lower() user = list_users(profile).get(email) if not user: log.error('Failed to retrieve user %s', email) return False property_map = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: if kwargs.get(field): property_map[field] = kwargs.get(field) # update kwargs = {} roles = [role.name for role in user.role_entities] for k, v in property_map.items(): resource_value = user[k] if resource_value is not None: # you can't update the username in update api call if k.lower() == 'name': continue if k.lower() == 'roles': if isinstance(v, six.string_types): v = v.split(',') if set(roles) != set(v): kwargs['roles'] = list(set(v)) elif resource_value != v: kwargs[k] = v if kwargs: user.update(**kwargs).refresh() fields_modified = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: fields_modified[field] = user[field] else: #succeeded, no change return True def delete_user(email, profile="splunk"): ''' Delete a splunk user by email CLI Example: salt myminion splunk_user.delete 'user@example.com' ''' client = _get_splunk(profile) user = list_users(profile).get(email) if user: try: client.users.delete(user.name) except (AuthenticationError, HTTPError) as e: log.info('Exception: %s', e) return False else: return False return user.name not in client.users
saltstack/salt
salt/modules/splunk.py
list_users
python
def list_users(profile="splunk"): ''' List all users in the splunk DB CLI Example: salt myminion splunk.list_users ''' config = __salt__['config.option'](profile) key = "splunk.users.{0}".format( config.get('host') ) if key not in __context__: _populate_cache(profile) return __context__[key]
List all users in the splunk DB CLI Example: salt myminion splunk.list_users
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/splunk.py#L147-L164
[ "def _populate_cache(profile=\"splunk\"):\n config = __salt__['config.option'](profile)\n\n key = \"splunk.users.{0}\".format(\n config.get('host')\n )\n\n if key not in __context__:\n client = _get_splunk(profile)\n kwargs = {'sort_key': 'realname', 'sort_dir': 'asc'}\n users = client.users.list(count=-1, **kwargs)\n\n result = {}\n for user in users:\n result[user.email.lower()] = user\n\n __context__[key] = result\n\n return True\n" ]
# -*- coding: utf-8 -*- ''' Module for interop with the Splunk API .. versionadded:: 2016.3.0. :depends: - splunk-sdk python module :configuration: Configure this module by specifying the name of a configuration profile in the minion config, minion pillar, or master config. The module will use the 'splunk' key by default, if defined. For example: .. code-block:: yaml splunk: username: alice password: abc123 host: example.splunkcloud.com port: 8080 ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import hmac import base64 import subprocess # Import 3rd-party libs from salt.ext import six HAS_LIBS = False try: import splunklib.client from splunklib.client import AuthenticationError from splunklib.binding import HTTPError HAS_LIBS = True except ImportError: pass log = logging.getLogger(__name__) __virtualname__ = 'splunk' SERVICE_NAME = "splunk" ALLOWED_FIELDS_FOR_MODIFICATION = [ 'realname', 'roles', 'defaultApp', 'tz', #'capabilities', 'name' ] REQUIRED_FIELDS_FOR_CREATE = [ 'realname', 'name', 'roles' ] def __virtual__(): ''' Only load this module if splunk is installed on this minion. ''' if HAS_LIBS: return __virtualname__ return (False, 'The splunk execution module failed to load: ' 'requires splunk python library to be installed.') def _get_secret_key(profile): config = __salt__['config.option'](profile) return config.get('password_secret_key') def _generate_password(email): m = hmac.new(base64.b64decode(_get_secret_key('splunk')), six.text_type([email, SERVICE_NAME])) return base64.urlsafe_b64encode(m.digest()).strip().replace('=', '') def _send_email(name, email): "send a email to inform user of account creation" config = __salt__['config.option']('splunk') email_object = config.get('email') if email_object: cc = email_object.get('cc') subject = email_object.get('subject') message = email_object.get('message').format(name, name, _generate_password(email), name) try: mail_process = subprocess.Popen(['mail', '-s', subject, '-c', cc, email], stdin=subprocess.PIPE) except Exception as e: log.error("unable to send email to %s: %s", email, e) mail_process.communicate(message) log.info("sent account creation email to %s", email) def _populate_cache(profile="splunk"): config = __salt__['config.option'](profile) key = "splunk.users.{0}".format( config.get('host') ) if key not in __context__: client = _get_splunk(profile) kwargs = {'sort_key': 'realname', 'sort_dir': 'asc'} users = client.users.list(count=-1, **kwargs) result = {} for user in users: result[user.email.lower()] = user __context__[key] = result return True def _get_splunk(profile): ''' Return the splunk client, cached into __context__ for performance ''' config = __salt__['config.option'](profile) key = "splunk.{0}:{1}:{2}:{3}".format( config.get('host'), config.get('port'), config.get('username'), config.get('password') ) if key not in __context__: __context__[key] = splunklib.client.connect( host=config.get('host'), port=config.get('port'), username=config.get('username'), password=config.get('password')) return __context__[key] def get_user(email, profile="splunk", **kwargs): ''' Get a splunk user by name/email CLI Example: salt myminion splunk.get_user 'user@example.com' user_details=false salt myminion splunk.get_user 'user@example.com' user_details=true ''' user_map = list_users(profile) user_found = email.lower() in user_map.keys() if not kwargs.get('user_details', False) and user_found: # The user is in splunk group, just return return True elif kwargs.get('user_details', False) and user_found: user = user_map[email.lower()] response = {} for field in ['defaultApp', 'realname', 'name', 'email']: response[field] = user[field] response['roles'] = [] for role in user.role_entities: response['roles'].append(role.name) return response return False def create_user(email, profile="splunk", **kwargs): ''' create a splunk user by name/email CLI Example: salt myminion splunk.create_user user@example.com roles=['user'] realname="Test User" name=testuser ''' client = _get_splunk(profile) email = email.lower() user = list_users(profile).get(email) if user: log.error("User is already present %s", email) return False property_map = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: if kwargs.get(field): property_map[field] = kwargs.get(field) try: # create for req_field in REQUIRED_FIELDS_FOR_CREATE: if not property_map.get(req_field): log.error("Missing required params %s", ', '.join([six.text_type(k) for k in REQUIRED_FIELDS_FOR_CREATE])) return False newuser = client.users.create(username=property_map['name'], password=_generate_password(email), roles=property_map['roles'], email=email, realname=property_map['realname']) _send_email(newuser.name, newuser.email) response = {} for field in ['email', 'password', 'realname', 'roles']: response[field] = newuser[field] except Exception as e: log.error("Caught exception %s", e) return False def update_user(email, profile="splunk", **kwargs): ''' Create a splunk user by email CLI Example: salt myminion splunk.update_user example@domain.com roles=['user'] realname="Test User" ''' client = _get_splunk(profile) email = email.lower() user = list_users(profile).get(email) if not user: log.error('Failed to retrieve user %s', email) return False property_map = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: if kwargs.get(field): property_map[field] = kwargs.get(field) # update kwargs = {} roles = [role.name for role in user.role_entities] for k, v in property_map.items(): resource_value = user[k] if resource_value is not None: # you can't update the username in update api call if k.lower() == 'name': continue if k.lower() == 'roles': if isinstance(v, six.string_types): v = v.split(',') if set(roles) != set(v): kwargs['roles'] = list(set(v)) elif resource_value != v: kwargs[k] = v if kwargs: user.update(**kwargs).refresh() fields_modified = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: fields_modified[field] = user[field] else: #succeeded, no change return True def delete_user(email, profile="splunk"): ''' Delete a splunk user by email CLI Example: salt myminion splunk_user.delete 'user@example.com' ''' client = _get_splunk(profile) user = list_users(profile).get(email) if user: try: client.users.delete(user.name) except (AuthenticationError, HTTPError) as e: log.info('Exception: %s', e) return False else: return False return user.name not in client.users
saltstack/salt
salt/modules/splunk.py
get_user
python
def get_user(email, profile="splunk", **kwargs): ''' Get a splunk user by name/email CLI Example: salt myminion splunk.get_user 'user@example.com' user_details=false salt myminion splunk.get_user 'user@example.com' user_details=true ''' user_map = list_users(profile) user_found = email.lower() in user_map.keys() if not kwargs.get('user_details', False) and user_found: # The user is in splunk group, just return return True elif kwargs.get('user_details', False) and user_found: user = user_map[email.lower()] response = {} for field in ['defaultApp', 'realname', 'name', 'email']: response[field] = user[field] response['roles'] = [] for role in user.role_entities: response['roles'].append(role.name) return response return False
Get a splunk user by name/email CLI Example: salt myminion splunk.get_user 'user@example.com' user_details=false salt myminion splunk.get_user 'user@example.com' user_details=true
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/splunk.py#L167-L196
[ "def list_users(profile=\"splunk\"):\n '''\n List all users in the splunk DB\n\n CLI Example:\n\n salt myminion splunk.list_users\n '''\n\n config = __salt__['config.option'](profile)\n key = \"splunk.users.{0}\".format(\n config.get('host')\n )\n\n if key not in __context__:\n _populate_cache(profile)\n\n return __context__[key]\n" ]
# -*- coding: utf-8 -*- ''' Module for interop with the Splunk API .. versionadded:: 2016.3.0. :depends: - splunk-sdk python module :configuration: Configure this module by specifying the name of a configuration profile in the minion config, minion pillar, or master config. The module will use the 'splunk' key by default, if defined. For example: .. code-block:: yaml splunk: username: alice password: abc123 host: example.splunkcloud.com port: 8080 ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import hmac import base64 import subprocess # Import 3rd-party libs from salt.ext import six HAS_LIBS = False try: import splunklib.client from splunklib.client import AuthenticationError from splunklib.binding import HTTPError HAS_LIBS = True except ImportError: pass log = logging.getLogger(__name__) __virtualname__ = 'splunk' SERVICE_NAME = "splunk" ALLOWED_FIELDS_FOR_MODIFICATION = [ 'realname', 'roles', 'defaultApp', 'tz', #'capabilities', 'name' ] REQUIRED_FIELDS_FOR_CREATE = [ 'realname', 'name', 'roles' ] def __virtual__(): ''' Only load this module if splunk is installed on this minion. ''' if HAS_LIBS: return __virtualname__ return (False, 'The splunk execution module failed to load: ' 'requires splunk python library to be installed.') def _get_secret_key(profile): config = __salt__['config.option'](profile) return config.get('password_secret_key') def _generate_password(email): m = hmac.new(base64.b64decode(_get_secret_key('splunk')), six.text_type([email, SERVICE_NAME])) return base64.urlsafe_b64encode(m.digest()).strip().replace('=', '') def _send_email(name, email): "send a email to inform user of account creation" config = __salt__['config.option']('splunk') email_object = config.get('email') if email_object: cc = email_object.get('cc') subject = email_object.get('subject') message = email_object.get('message').format(name, name, _generate_password(email), name) try: mail_process = subprocess.Popen(['mail', '-s', subject, '-c', cc, email], stdin=subprocess.PIPE) except Exception as e: log.error("unable to send email to %s: %s", email, e) mail_process.communicate(message) log.info("sent account creation email to %s", email) def _populate_cache(profile="splunk"): config = __salt__['config.option'](profile) key = "splunk.users.{0}".format( config.get('host') ) if key not in __context__: client = _get_splunk(profile) kwargs = {'sort_key': 'realname', 'sort_dir': 'asc'} users = client.users.list(count=-1, **kwargs) result = {} for user in users: result[user.email.lower()] = user __context__[key] = result return True def _get_splunk(profile): ''' Return the splunk client, cached into __context__ for performance ''' config = __salt__['config.option'](profile) key = "splunk.{0}:{1}:{2}:{3}".format( config.get('host'), config.get('port'), config.get('username'), config.get('password') ) if key not in __context__: __context__[key] = splunklib.client.connect( host=config.get('host'), port=config.get('port'), username=config.get('username'), password=config.get('password')) return __context__[key] def list_users(profile="splunk"): ''' List all users in the splunk DB CLI Example: salt myminion splunk.list_users ''' config = __salt__['config.option'](profile) key = "splunk.users.{0}".format( config.get('host') ) if key not in __context__: _populate_cache(profile) return __context__[key] def create_user(email, profile="splunk", **kwargs): ''' create a splunk user by name/email CLI Example: salt myminion splunk.create_user user@example.com roles=['user'] realname="Test User" name=testuser ''' client = _get_splunk(profile) email = email.lower() user = list_users(profile).get(email) if user: log.error("User is already present %s", email) return False property_map = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: if kwargs.get(field): property_map[field] = kwargs.get(field) try: # create for req_field in REQUIRED_FIELDS_FOR_CREATE: if not property_map.get(req_field): log.error("Missing required params %s", ', '.join([six.text_type(k) for k in REQUIRED_FIELDS_FOR_CREATE])) return False newuser = client.users.create(username=property_map['name'], password=_generate_password(email), roles=property_map['roles'], email=email, realname=property_map['realname']) _send_email(newuser.name, newuser.email) response = {} for field in ['email', 'password', 'realname', 'roles']: response[field] = newuser[field] except Exception as e: log.error("Caught exception %s", e) return False def update_user(email, profile="splunk", **kwargs): ''' Create a splunk user by email CLI Example: salt myminion splunk.update_user example@domain.com roles=['user'] realname="Test User" ''' client = _get_splunk(profile) email = email.lower() user = list_users(profile).get(email) if not user: log.error('Failed to retrieve user %s', email) return False property_map = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: if kwargs.get(field): property_map[field] = kwargs.get(field) # update kwargs = {} roles = [role.name for role in user.role_entities] for k, v in property_map.items(): resource_value = user[k] if resource_value is not None: # you can't update the username in update api call if k.lower() == 'name': continue if k.lower() == 'roles': if isinstance(v, six.string_types): v = v.split(',') if set(roles) != set(v): kwargs['roles'] = list(set(v)) elif resource_value != v: kwargs[k] = v if kwargs: user.update(**kwargs).refresh() fields_modified = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: fields_modified[field] = user[field] else: #succeeded, no change return True def delete_user(email, profile="splunk"): ''' Delete a splunk user by email CLI Example: salt myminion splunk_user.delete 'user@example.com' ''' client = _get_splunk(profile) user = list_users(profile).get(email) if user: try: client.users.delete(user.name) except (AuthenticationError, HTTPError) as e: log.info('Exception: %s', e) return False else: return False return user.name not in client.users
saltstack/salt
salt/modules/splunk.py
create_user
python
def create_user(email, profile="splunk", **kwargs): ''' create a splunk user by name/email CLI Example: salt myminion splunk.create_user user@example.com roles=['user'] realname="Test User" name=testuser ''' client = _get_splunk(profile) email = email.lower() user = list_users(profile).get(email) if user: log.error("User is already present %s", email) return False property_map = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: if kwargs.get(field): property_map[field] = kwargs.get(field) try: # create for req_field in REQUIRED_FIELDS_FOR_CREATE: if not property_map.get(req_field): log.error("Missing required params %s", ', '.join([six.text_type(k) for k in REQUIRED_FIELDS_FOR_CREATE])) return False newuser = client.users.create(username=property_map['name'], password=_generate_password(email), roles=property_map['roles'], email=email, realname=property_map['realname']) _send_email(newuser.name, newuser.email) response = {} for field in ['email', 'password', 'realname', 'roles']: response[field] = newuser[field] except Exception as e: log.error("Caught exception %s", e) return False
create a splunk user by name/email CLI Example: salt myminion splunk.create_user user@example.com roles=['user'] realname="Test User" name=testuser
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/splunk.py#L199-L246
[ "def list_users(profile=\"splunk\"):\n '''\n List all users in the splunk DB\n\n CLI Example:\n\n salt myminion splunk.list_users\n '''\n\n config = __salt__['config.option'](profile)\n key = \"splunk.users.{0}\".format(\n config.get('host')\n )\n\n if key not in __context__:\n _populate_cache(profile)\n\n return __context__[key]\n", "def _generate_password(email):\n m = hmac.new(base64.b64decode(_get_secret_key('splunk')), six.text_type([email, SERVICE_NAME]))\n return base64.urlsafe_b64encode(m.digest()).strip().replace('=', '')\n", "def _send_email(name, email):\n \"send a email to inform user of account creation\"\n config = __salt__['config.option']('splunk')\n email_object = config.get('email')\n if email_object:\n cc = email_object.get('cc')\n subject = email_object.get('subject')\n message = email_object.get('message').format(name, name, _generate_password(email), name)\n\n try:\n mail_process = subprocess.Popen(['mail', '-s', subject, '-c', cc, email], stdin=subprocess.PIPE)\n except Exception as e:\n log.error(\"unable to send email to %s: %s\", email, e)\n\n mail_process.communicate(message)\n\n log.info(\"sent account creation email to %s\", email)\n", "def _get_splunk(profile):\n '''\n Return the splunk client, cached into __context__ for performance\n '''\n config = __salt__['config.option'](profile)\n\n key = \"splunk.{0}:{1}:{2}:{3}\".format(\n config.get('host'),\n config.get('port'),\n config.get('username'),\n config.get('password')\n )\n\n if key not in __context__:\n __context__[key] = splunklib.client.connect(\n host=config.get('host'),\n port=config.get('port'),\n username=config.get('username'),\n password=config.get('password'))\n\n return __context__[key]\n" ]
# -*- coding: utf-8 -*- ''' Module for interop with the Splunk API .. versionadded:: 2016.3.0. :depends: - splunk-sdk python module :configuration: Configure this module by specifying the name of a configuration profile in the minion config, minion pillar, or master config. The module will use the 'splunk' key by default, if defined. For example: .. code-block:: yaml splunk: username: alice password: abc123 host: example.splunkcloud.com port: 8080 ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import hmac import base64 import subprocess # Import 3rd-party libs from salt.ext import six HAS_LIBS = False try: import splunklib.client from splunklib.client import AuthenticationError from splunklib.binding import HTTPError HAS_LIBS = True except ImportError: pass log = logging.getLogger(__name__) __virtualname__ = 'splunk' SERVICE_NAME = "splunk" ALLOWED_FIELDS_FOR_MODIFICATION = [ 'realname', 'roles', 'defaultApp', 'tz', #'capabilities', 'name' ] REQUIRED_FIELDS_FOR_CREATE = [ 'realname', 'name', 'roles' ] def __virtual__(): ''' Only load this module if splunk is installed on this minion. ''' if HAS_LIBS: return __virtualname__ return (False, 'The splunk execution module failed to load: ' 'requires splunk python library to be installed.') def _get_secret_key(profile): config = __salt__['config.option'](profile) return config.get('password_secret_key') def _generate_password(email): m = hmac.new(base64.b64decode(_get_secret_key('splunk')), six.text_type([email, SERVICE_NAME])) return base64.urlsafe_b64encode(m.digest()).strip().replace('=', '') def _send_email(name, email): "send a email to inform user of account creation" config = __salt__['config.option']('splunk') email_object = config.get('email') if email_object: cc = email_object.get('cc') subject = email_object.get('subject') message = email_object.get('message').format(name, name, _generate_password(email), name) try: mail_process = subprocess.Popen(['mail', '-s', subject, '-c', cc, email], stdin=subprocess.PIPE) except Exception as e: log.error("unable to send email to %s: %s", email, e) mail_process.communicate(message) log.info("sent account creation email to %s", email) def _populate_cache(profile="splunk"): config = __salt__['config.option'](profile) key = "splunk.users.{0}".format( config.get('host') ) if key not in __context__: client = _get_splunk(profile) kwargs = {'sort_key': 'realname', 'sort_dir': 'asc'} users = client.users.list(count=-1, **kwargs) result = {} for user in users: result[user.email.lower()] = user __context__[key] = result return True def _get_splunk(profile): ''' Return the splunk client, cached into __context__ for performance ''' config = __salt__['config.option'](profile) key = "splunk.{0}:{1}:{2}:{3}".format( config.get('host'), config.get('port'), config.get('username'), config.get('password') ) if key not in __context__: __context__[key] = splunklib.client.connect( host=config.get('host'), port=config.get('port'), username=config.get('username'), password=config.get('password')) return __context__[key] def list_users(profile="splunk"): ''' List all users in the splunk DB CLI Example: salt myminion splunk.list_users ''' config = __salt__['config.option'](profile) key = "splunk.users.{0}".format( config.get('host') ) if key not in __context__: _populate_cache(profile) return __context__[key] def get_user(email, profile="splunk", **kwargs): ''' Get a splunk user by name/email CLI Example: salt myminion splunk.get_user 'user@example.com' user_details=false salt myminion splunk.get_user 'user@example.com' user_details=true ''' user_map = list_users(profile) user_found = email.lower() in user_map.keys() if not kwargs.get('user_details', False) and user_found: # The user is in splunk group, just return return True elif kwargs.get('user_details', False) and user_found: user = user_map[email.lower()] response = {} for field in ['defaultApp', 'realname', 'name', 'email']: response[field] = user[field] response['roles'] = [] for role in user.role_entities: response['roles'].append(role.name) return response return False def update_user(email, profile="splunk", **kwargs): ''' Create a splunk user by email CLI Example: salt myminion splunk.update_user example@domain.com roles=['user'] realname="Test User" ''' client = _get_splunk(profile) email = email.lower() user = list_users(profile).get(email) if not user: log.error('Failed to retrieve user %s', email) return False property_map = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: if kwargs.get(field): property_map[field] = kwargs.get(field) # update kwargs = {} roles = [role.name for role in user.role_entities] for k, v in property_map.items(): resource_value = user[k] if resource_value is not None: # you can't update the username in update api call if k.lower() == 'name': continue if k.lower() == 'roles': if isinstance(v, six.string_types): v = v.split(',') if set(roles) != set(v): kwargs['roles'] = list(set(v)) elif resource_value != v: kwargs[k] = v if kwargs: user.update(**kwargs).refresh() fields_modified = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: fields_modified[field] = user[field] else: #succeeded, no change return True def delete_user(email, profile="splunk"): ''' Delete a splunk user by email CLI Example: salt myminion splunk_user.delete 'user@example.com' ''' client = _get_splunk(profile) user = list_users(profile).get(email) if user: try: client.users.delete(user.name) except (AuthenticationError, HTTPError) as e: log.info('Exception: %s', e) return False else: return False return user.name not in client.users
saltstack/salt
salt/modules/splunk.py
update_user
python
def update_user(email, profile="splunk", **kwargs): ''' Create a splunk user by email CLI Example: salt myminion splunk.update_user example@domain.com roles=['user'] realname="Test User" ''' client = _get_splunk(profile) email = email.lower() user = list_users(profile).get(email) if not user: log.error('Failed to retrieve user %s', email) return False property_map = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: if kwargs.get(field): property_map[field] = kwargs.get(field) # update kwargs = {} roles = [role.name for role in user.role_entities] for k, v in property_map.items(): resource_value = user[k] if resource_value is not None: # you can't update the username in update api call if k.lower() == 'name': continue if k.lower() == 'roles': if isinstance(v, six.string_types): v = v.split(',') if set(roles) != set(v): kwargs['roles'] = list(set(v)) elif resource_value != v: kwargs[k] = v if kwargs: user.update(**kwargs).refresh() fields_modified = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: fields_modified[field] = user[field] else: #succeeded, no change return True
Create a splunk user by email CLI Example: salt myminion splunk.update_user example@domain.com roles=['user'] realname="Test User"
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/splunk.py#L249-L301
[ "def list_users(profile=\"splunk\"):\n '''\n List all users in the splunk DB\n\n CLI Example:\n\n salt myminion splunk.list_users\n '''\n\n config = __salt__['config.option'](profile)\n key = \"splunk.users.{0}\".format(\n config.get('host')\n )\n\n if key not in __context__:\n _populate_cache(profile)\n\n return __context__[key]\n", "def _get_splunk(profile):\n '''\n Return the splunk client, cached into __context__ for performance\n '''\n config = __salt__['config.option'](profile)\n\n key = \"splunk.{0}:{1}:{2}:{3}\".format(\n config.get('host'),\n config.get('port'),\n config.get('username'),\n config.get('password')\n )\n\n if key not in __context__:\n __context__[key] = splunklib.client.connect(\n host=config.get('host'),\n port=config.get('port'),\n username=config.get('username'),\n password=config.get('password'))\n\n return __context__[key]\n" ]
# -*- coding: utf-8 -*- ''' Module for interop with the Splunk API .. versionadded:: 2016.3.0. :depends: - splunk-sdk python module :configuration: Configure this module by specifying the name of a configuration profile in the minion config, minion pillar, or master config. The module will use the 'splunk' key by default, if defined. For example: .. code-block:: yaml splunk: username: alice password: abc123 host: example.splunkcloud.com port: 8080 ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import hmac import base64 import subprocess # Import 3rd-party libs from salt.ext import six HAS_LIBS = False try: import splunklib.client from splunklib.client import AuthenticationError from splunklib.binding import HTTPError HAS_LIBS = True except ImportError: pass log = logging.getLogger(__name__) __virtualname__ = 'splunk' SERVICE_NAME = "splunk" ALLOWED_FIELDS_FOR_MODIFICATION = [ 'realname', 'roles', 'defaultApp', 'tz', #'capabilities', 'name' ] REQUIRED_FIELDS_FOR_CREATE = [ 'realname', 'name', 'roles' ] def __virtual__(): ''' Only load this module if splunk is installed on this minion. ''' if HAS_LIBS: return __virtualname__ return (False, 'The splunk execution module failed to load: ' 'requires splunk python library to be installed.') def _get_secret_key(profile): config = __salt__['config.option'](profile) return config.get('password_secret_key') def _generate_password(email): m = hmac.new(base64.b64decode(_get_secret_key('splunk')), six.text_type([email, SERVICE_NAME])) return base64.urlsafe_b64encode(m.digest()).strip().replace('=', '') def _send_email(name, email): "send a email to inform user of account creation" config = __salt__['config.option']('splunk') email_object = config.get('email') if email_object: cc = email_object.get('cc') subject = email_object.get('subject') message = email_object.get('message').format(name, name, _generate_password(email), name) try: mail_process = subprocess.Popen(['mail', '-s', subject, '-c', cc, email], stdin=subprocess.PIPE) except Exception as e: log.error("unable to send email to %s: %s", email, e) mail_process.communicate(message) log.info("sent account creation email to %s", email) def _populate_cache(profile="splunk"): config = __salt__['config.option'](profile) key = "splunk.users.{0}".format( config.get('host') ) if key not in __context__: client = _get_splunk(profile) kwargs = {'sort_key': 'realname', 'sort_dir': 'asc'} users = client.users.list(count=-1, **kwargs) result = {} for user in users: result[user.email.lower()] = user __context__[key] = result return True def _get_splunk(profile): ''' Return the splunk client, cached into __context__ for performance ''' config = __salt__['config.option'](profile) key = "splunk.{0}:{1}:{2}:{3}".format( config.get('host'), config.get('port'), config.get('username'), config.get('password') ) if key not in __context__: __context__[key] = splunklib.client.connect( host=config.get('host'), port=config.get('port'), username=config.get('username'), password=config.get('password')) return __context__[key] def list_users(profile="splunk"): ''' List all users in the splunk DB CLI Example: salt myminion splunk.list_users ''' config = __salt__['config.option'](profile) key = "splunk.users.{0}".format( config.get('host') ) if key not in __context__: _populate_cache(profile) return __context__[key] def get_user(email, profile="splunk", **kwargs): ''' Get a splunk user by name/email CLI Example: salt myminion splunk.get_user 'user@example.com' user_details=false salt myminion splunk.get_user 'user@example.com' user_details=true ''' user_map = list_users(profile) user_found = email.lower() in user_map.keys() if not kwargs.get('user_details', False) and user_found: # The user is in splunk group, just return return True elif kwargs.get('user_details', False) and user_found: user = user_map[email.lower()] response = {} for field in ['defaultApp', 'realname', 'name', 'email']: response[field] = user[field] response['roles'] = [] for role in user.role_entities: response['roles'].append(role.name) return response return False def create_user(email, profile="splunk", **kwargs): ''' create a splunk user by name/email CLI Example: salt myminion splunk.create_user user@example.com roles=['user'] realname="Test User" name=testuser ''' client = _get_splunk(profile) email = email.lower() user = list_users(profile).get(email) if user: log.error("User is already present %s", email) return False property_map = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: if kwargs.get(field): property_map[field] = kwargs.get(field) try: # create for req_field in REQUIRED_FIELDS_FOR_CREATE: if not property_map.get(req_field): log.error("Missing required params %s", ', '.join([six.text_type(k) for k in REQUIRED_FIELDS_FOR_CREATE])) return False newuser = client.users.create(username=property_map['name'], password=_generate_password(email), roles=property_map['roles'], email=email, realname=property_map['realname']) _send_email(newuser.name, newuser.email) response = {} for field in ['email', 'password', 'realname', 'roles']: response[field] = newuser[field] except Exception as e: log.error("Caught exception %s", e) return False def delete_user(email, profile="splunk"): ''' Delete a splunk user by email CLI Example: salt myminion splunk_user.delete 'user@example.com' ''' client = _get_splunk(profile) user = list_users(profile).get(email) if user: try: client.users.delete(user.name) except (AuthenticationError, HTTPError) as e: log.info('Exception: %s', e) return False else: return False return user.name not in client.users
saltstack/salt
salt/modules/splunk.py
delete_user
python
def delete_user(email, profile="splunk"): ''' Delete a splunk user by email CLI Example: salt myminion splunk_user.delete 'user@example.com' ''' client = _get_splunk(profile) user = list_users(profile).get(email) if user: try: client.users.delete(user.name) except (AuthenticationError, HTTPError) as e: log.info('Exception: %s', e) return False else: return False return user.name not in client.users
Delete a splunk user by email CLI Example: salt myminion splunk_user.delete 'user@example.com'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/splunk.py#L304-L326
[ "def list_users(profile=\"splunk\"):\n '''\n List all users in the splunk DB\n\n CLI Example:\n\n salt myminion splunk.list_users\n '''\n\n config = __salt__['config.option'](profile)\n key = \"splunk.users.{0}\".format(\n config.get('host')\n )\n\n if key not in __context__:\n _populate_cache(profile)\n\n return __context__[key]\n", "def _get_splunk(profile):\n '''\n Return the splunk client, cached into __context__ for performance\n '''\n config = __salt__['config.option'](profile)\n\n key = \"splunk.{0}:{1}:{2}:{3}\".format(\n config.get('host'),\n config.get('port'),\n config.get('username'),\n config.get('password')\n )\n\n if key not in __context__:\n __context__[key] = splunklib.client.connect(\n host=config.get('host'),\n port=config.get('port'),\n username=config.get('username'),\n password=config.get('password'))\n\n return __context__[key]\n" ]
# -*- coding: utf-8 -*- ''' Module for interop with the Splunk API .. versionadded:: 2016.3.0. :depends: - splunk-sdk python module :configuration: Configure this module by specifying the name of a configuration profile in the minion config, minion pillar, or master config. The module will use the 'splunk' key by default, if defined. For example: .. code-block:: yaml splunk: username: alice password: abc123 host: example.splunkcloud.com port: 8080 ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import hmac import base64 import subprocess # Import 3rd-party libs from salt.ext import six HAS_LIBS = False try: import splunklib.client from splunklib.client import AuthenticationError from splunklib.binding import HTTPError HAS_LIBS = True except ImportError: pass log = logging.getLogger(__name__) __virtualname__ = 'splunk' SERVICE_NAME = "splunk" ALLOWED_FIELDS_FOR_MODIFICATION = [ 'realname', 'roles', 'defaultApp', 'tz', #'capabilities', 'name' ] REQUIRED_FIELDS_FOR_CREATE = [ 'realname', 'name', 'roles' ] def __virtual__(): ''' Only load this module if splunk is installed on this minion. ''' if HAS_LIBS: return __virtualname__ return (False, 'The splunk execution module failed to load: ' 'requires splunk python library to be installed.') def _get_secret_key(profile): config = __salt__['config.option'](profile) return config.get('password_secret_key') def _generate_password(email): m = hmac.new(base64.b64decode(_get_secret_key('splunk')), six.text_type([email, SERVICE_NAME])) return base64.urlsafe_b64encode(m.digest()).strip().replace('=', '') def _send_email(name, email): "send a email to inform user of account creation" config = __salt__['config.option']('splunk') email_object = config.get('email') if email_object: cc = email_object.get('cc') subject = email_object.get('subject') message = email_object.get('message').format(name, name, _generate_password(email), name) try: mail_process = subprocess.Popen(['mail', '-s', subject, '-c', cc, email], stdin=subprocess.PIPE) except Exception as e: log.error("unable to send email to %s: %s", email, e) mail_process.communicate(message) log.info("sent account creation email to %s", email) def _populate_cache(profile="splunk"): config = __salt__['config.option'](profile) key = "splunk.users.{0}".format( config.get('host') ) if key not in __context__: client = _get_splunk(profile) kwargs = {'sort_key': 'realname', 'sort_dir': 'asc'} users = client.users.list(count=-1, **kwargs) result = {} for user in users: result[user.email.lower()] = user __context__[key] = result return True def _get_splunk(profile): ''' Return the splunk client, cached into __context__ for performance ''' config = __salt__['config.option'](profile) key = "splunk.{0}:{1}:{2}:{3}".format( config.get('host'), config.get('port'), config.get('username'), config.get('password') ) if key not in __context__: __context__[key] = splunklib.client.connect( host=config.get('host'), port=config.get('port'), username=config.get('username'), password=config.get('password')) return __context__[key] def list_users(profile="splunk"): ''' List all users in the splunk DB CLI Example: salt myminion splunk.list_users ''' config = __salt__['config.option'](profile) key = "splunk.users.{0}".format( config.get('host') ) if key not in __context__: _populate_cache(profile) return __context__[key] def get_user(email, profile="splunk", **kwargs): ''' Get a splunk user by name/email CLI Example: salt myminion splunk.get_user 'user@example.com' user_details=false salt myminion splunk.get_user 'user@example.com' user_details=true ''' user_map = list_users(profile) user_found = email.lower() in user_map.keys() if not kwargs.get('user_details', False) and user_found: # The user is in splunk group, just return return True elif kwargs.get('user_details', False) and user_found: user = user_map[email.lower()] response = {} for field in ['defaultApp', 'realname', 'name', 'email']: response[field] = user[field] response['roles'] = [] for role in user.role_entities: response['roles'].append(role.name) return response return False def create_user(email, profile="splunk", **kwargs): ''' create a splunk user by name/email CLI Example: salt myminion splunk.create_user user@example.com roles=['user'] realname="Test User" name=testuser ''' client = _get_splunk(profile) email = email.lower() user = list_users(profile).get(email) if user: log.error("User is already present %s", email) return False property_map = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: if kwargs.get(field): property_map[field] = kwargs.get(field) try: # create for req_field in REQUIRED_FIELDS_FOR_CREATE: if not property_map.get(req_field): log.error("Missing required params %s", ', '.join([six.text_type(k) for k in REQUIRED_FIELDS_FOR_CREATE])) return False newuser = client.users.create(username=property_map['name'], password=_generate_password(email), roles=property_map['roles'], email=email, realname=property_map['realname']) _send_email(newuser.name, newuser.email) response = {} for field in ['email', 'password', 'realname', 'roles']: response[field] = newuser[field] except Exception as e: log.error("Caught exception %s", e) return False def update_user(email, profile="splunk", **kwargs): ''' Create a splunk user by email CLI Example: salt myminion splunk.update_user example@domain.com roles=['user'] realname="Test User" ''' client = _get_splunk(profile) email = email.lower() user = list_users(profile).get(email) if not user: log.error('Failed to retrieve user %s', email) return False property_map = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: if kwargs.get(field): property_map[field] = kwargs.get(field) # update kwargs = {} roles = [role.name for role in user.role_entities] for k, v in property_map.items(): resource_value = user[k] if resource_value is not None: # you can't update the username in update api call if k.lower() == 'name': continue if k.lower() == 'roles': if isinstance(v, six.string_types): v = v.split(',') if set(roles) != set(v): kwargs['roles'] = list(set(v)) elif resource_value != v: kwargs[k] = v if kwargs: user.update(**kwargs).refresh() fields_modified = {} for field in ALLOWED_FIELDS_FOR_MODIFICATION: fields_modified[field] = user[field] else: #succeeded, no change return True
saltstack/salt
salt/grains/extra.py
shell
python
def shell(): ''' Return the default shell to use on this system ''' # Provides: # shell if salt.utils.platform.is_windows(): env_var = 'COMSPEC' default = r'C:\Windows\system32\cmd.exe' else: env_var = 'SHELL' default = '/bin/sh' return {'shell': os.environ.get(env_var, default)}
Return the default shell to use on this system
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/extra.py#L21-L34
null
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals # Import python libs import os # Import third party libs import logging # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.platform import salt.utils.yaml __proxyenabled__ = ['*'] log = logging.getLogger(__name__) def config(): ''' Return the grains set in the grains file ''' if 'conf_file' not in __opts__: return {} if os.path.isdir(__opts__['conf_file']): if salt.utils.platform.is_proxy(): gfn = os.path.join( __opts__['conf_file'], 'proxy.d', __opts__['id'], 'grains' ) else: gfn = os.path.join( __opts__['conf_file'], 'grains' ) else: if salt.utils.platform.is_proxy(): gfn = os.path.join( os.path.dirname(__opts__['conf_file']), 'proxy.d', __opts__['id'], 'grains' ) else: gfn = os.path.join( os.path.dirname(__opts__['conf_file']), 'grains' ) if os.path.isfile(gfn): log.debug('Loading static grains from %s', gfn) with salt.utils.files.fopen(gfn, 'rb') as fp_: try: return salt.utils.data.decode(salt.utils.yaml.safe_load(fp_)) except Exception: log.warning("Bad syntax in grains file! Skipping.") return {} return {}
saltstack/salt
salt/grains/extra.py
config
python
def config(): ''' Return the grains set in the grains file ''' if 'conf_file' not in __opts__: return {} if os.path.isdir(__opts__['conf_file']): if salt.utils.platform.is_proxy(): gfn = os.path.join( __opts__['conf_file'], 'proxy.d', __opts__['id'], 'grains' ) else: gfn = os.path.join( __opts__['conf_file'], 'grains' ) else: if salt.utils.platform.is_proxy(): gfn = os.path.join( os.path.dirname(__opts__['conf_file']), 'proxy.d', __opts__['id'], 'grains' ) else: gfn = os.path.join( os.path.dirname(__opts__['conf_file']), 'grains' ) if os.path.isfile(gfn): log.debug('Loading static grains from %s', gfn) with salt.utils.files.fopen(gfn, 'rb') as fp_: try: return salt.utils.data.decode(salt.utils.yaml.safe_load(fp_)) except Exception: log.warning("Bad syntax in grains file! Skipping.") return {} return {}
Return the grains set in the grains file
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/grains/extra.py#L37-L77
null
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals # Import python libs import os # Import third party libs import logging # Import salt libs import salt.utils.data import salt.utils.files import salt.utils.platform import salt.utils.yaml __proxyenabled__ = ['*'] log = logging.getLogger(__name__) def shell(): ''' Return the default shell to use on this system ''' # Provides: # shell if salt.utils.platform.is_windows(): env_var = 'COMSPEC' default = r'C:\Windows\system32\cmd.exe' else: env_var = 'SHELL' default = '/bin/sh' return {'shell': os.environ.get(env_var, default)}
saltstack/salt
salt/modules/mac_desktop.py
get_output_volume
python
def get_output_volume(): ''' Get the output volume (range 0 to 100) CLI Example: .. code-block:: bash salt '*' desktop.get_output_volume ''' cmd = 'osascript -e "get output volume of (get volume settings)"' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return call.get('stdout')
Get the output volume (range 0 to 100) CLI Example: .. code-block:: bash salt '*' desktop.get_output_volume
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_desktop.py#L24-L42
[ "def _check_cmd(call):\n '''\n Check the output of the cmd.run_all function call.\n '''\n if call['retcode'] != 0:\n comment = ''\n std_err = call.get('stderr')\n std_out = call.get('stdout')\n if std_err:\n comment += std_err\n if std_out:\n comment += std_out\n\n raise CommandExecutionError('Error running command: {0}'.format(comment))\n\n return call\n" ]
# -*- coding: utf-8 -*- ''' macOS implementations of various commands in the "desktop" interface ''' from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.platform from salt.exceptions import CommandExecutionError # Define the module's virtual name __virtualname__ = 'desktop' def __virtual__(): ''' Only load on Mac systems ''' if salt.utils.platform.is_darwin(): return __virtualname__ return False, 'Cannot load macOS desktop module: This is not a macOS host.' def set_output_volume(volume): ''' Set the volume of sound. volume The level of volume. Can range from 0 to 100. CLI Example: .. code-block:: bash salt '*' desktop.set_output_volume <volume> ''' cmd = 'osascript -e "set volume output volume {0}"'.format(volume) call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return get_output_volume() def screensaver(): ''' Launch the screensaver. CLI Example: .. code-block:: bash salt '*' desktop.screensaver ''' cmd = 'open /System/Library/Frameworks/ScreenSaver.framework/Versions/A/Resources/ScreenSaverEngine.app' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True def lock(): ''' Lock the desktop session CLI Example: .. code-block:: bash salt '*' desktop.lock ''' cmd = '/System/Library/CoreServices/Menu\\ Extras/User.menu/Contents/Resources/CGSession -suspend' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True def say(*words): ''' Say some words. words The words to execute the say command with. CLI Example: .. code-block:: bash salt '*' desktop.say <word0> <word1> ... <wordN> ''' cmd = 'say {0}'.format(' '.join(words)) call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True def _check_cmd(call): ''' Check the output of the cmd.run_all function call. ''' if call['retcode'] != 0: comment = '' std_err = call.get('stderr') std_out = call.get('stdout') if std_err: comment += std_err if std_out: comment += std_out raise CommandExecutionError('Error running command: {0}'.format(comment)) return call
saltstack/salt
salt/modules/mac_desktop.py
set_output_volume
python
def set_output_volume(volume): ''' Set the volume of sound. volume The level of volume. Can range from 0 to 100. CLI Example: .. code-block:: bash salt '*' desktop.set_output_volume <volume> ''' cmd = 'osascript -e "set volume output volume {0}"'.format(volume) call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return get_output_volume()
Set the volume of sound. volume The level of volume. Can range from 0 to 100. CLI Example: .. code-block:: bash salt '*' desktop.set_output_volume <volume>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_desktop.py#L45-L66
[ "def _check_cmd(call):\n '''\n Check the output of the cmd.run_all function call.\n '''\n if call['retcode'] != 0:\n comment = ''\n std_err = call.get('stderr')\n std_out = call.get('stdout')\n if std_err:\n comment += std_err\n if std_out:\n comment += std_out\n\n raise CommandExecutionError('Error running command: {0}'.format(comment))\n\n return call\n", "def get_output_volume():\n '''\n Get the output volume (range 0 to 100)\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' desktop.get_output_volume\n '''\n cmd = 'osascript -e \"get output volume of (get volume settings)\"'\n call = __salt__['cmd.run_all'](\n cmd,\n output_loglevel='debug',\n python_shell=False\n )\n _check_cmd(call)\n\n return call.get('stdout')\n" ]
# -*- coding: utf-8 -*- ''' macOS implementations of various commands in the "desktop" interface ''' from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.platform from salt.exceptions import CommandExecutionError # Define the module's virtual name __virtualname__ = 'desktop' def __virtual__(): ''' Only load on Mac systems ''' if salt.utils.platform.is_darwin(): return __virtualname__ return False, 'Cannot load macOS desktop module: This is not a macOS host.' def get_output_volume(): ''' Get the output volume (range 0 to 100) CLI Example: .. code-block:: bash salt '*' desktop.get_output_volume ''' cmd = 'osascript -e "get output volume of (get volume settings)"' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return call.get('stdout') def screensaver(): ''' Launch the screensaver. CLI Example: .. code-block:: bash salt '*' desktop.screensaver ''' cmd = 'open /System/Library/Frameworks/ScreenSaver.framework/Versions/A/Resources/ScreenSaverEngine.app' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True def lock(): ''' Lock the desktop session CLI Example: .. code-block:: bash salt '*' desktop.lock ''' cmd = '/System/Library/CoreServices/Menu\\ Extras/User.menu/Contents/Resources/CGSession -suspend' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True def say(*words): ''' Say some words. words The words to execute the say command with. CLI Example: .. code-block:: bash salt '*' desktop.say <word0> <word1> ... <wordN> ''' cmd = 'say {0}'.format(' '.join(words)) call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True def _check_cmd(call): ''' Check the output of the cmd.run_all function call. ''' if call['retcode'] != 0: comment = '' std_err = call.get('stderr') std_out = call.get('stdout') if std_err: comment += std_err if std_out: comment += std_out raise CommandExecutionError('Error running command: {0}'.format(comment)) return call
saltstack/salt
salt/modules/mac_desktop.py
screensaver
python
def screensaver(): ''' Launch the screensaver. CLI Example: .. code-block:: bash salt '*' desktop.screensaver ''' cmd = 'open /System/Library/Frameworks/ScreenSaver.framework/Versions/A/Resources/ScreenSaverEngine.app' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True
Launch the screensaver. CLI Example: .. code-block:: bash salt '*' desktop.screensaver
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_desktop.py#L69-L87
[ "def _check_cmd(call):\n '''\n Check the output of the cmd.run_all function call.\n '''\n if call['retcode'] != 0:\n comment = ''\n std_err = call.get('stderr')\n std_out = call.get('stdout')\n if std_err:\n comment += std_err\n if std_out:\n comment += std_out\n\n raise CommandExecutionError('Error running command: {0}'.format(comment))\n\n return call\n" ]
# -*- coding: utf-8 -*- ''' macOS implementations of various commands in the "desktop" interface ''' from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.platform from salt.exceptions import CommandExecutionError # Define the module's virtual name __virtualname__ = 'desktop' def __virtual__(): ''' Only load on Mac systems ''' if salt.utils.platform.is_darwin(): return __virtualname__ return False, 'Cannot load macOS desktop module: This is not a macOS host.' def get_output_volume(): ''' Get the output volume (range 0 to 100) CLI Example: .. code-block:: bash salt '*' desktop.get_output_volume ''' cmd = 'osascript -e "get output volume of (get volume settings)"' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return call.get('stdout') def set_output_volume(volume): ''' Set the volume of sound. volume The level of volume. Can range from 0 to 100. CLI Example: .. code-block:: bash salt '*' desktop.set_output_volume <volume> ''' cmd = 'osascript -e "set volume output volume {0}"'.format(volume) call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return get_output_volume() def lock(): ''' Lock the desktop session CLI Example: .. code-block:: bash salt '*' desktop.lock ''' cmd = '/System/Library/CoreServices/Menu\\ Extras/User.menu/Contents/Resources/CGSession -suspend' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True def say(*words): ''' Say some words. words The words to execute the say command with. CLI Example: .. code-block:: bash salt '*' desktop.say <word0> <word1> ... <wordN> ''' cmd = 'say {0}'.format(' '.join(words)) call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True def _check_cmd(call): ''' Check the output of the cmd.run_all function call. ''' if call['retcode'] != 0: comment = '' std_err = call.get('stderr') std_out = call.get('stdout') if std_err: comment += std_err if std_out: comment += std_out raise CommandExecutionError('Error running command: {0}'.format(comment)) return call
saltstack/salt
salt/modules/mac_desktop.py
say
python
def say(*words): ''' Say some words. words The words to execute the say command with. CLI Example: .. code-block:: bash salt '*' desktop.say <word0> <word1> ... <wordN> ''' cmd = 'say {0}'.format(' '.join(words)) call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True
Say some words. words The words to execute the say command with. CLI Example: .. code-block:: bash salt '*' desktop.say <word0> <word1> ... <wordN>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_desktop.py#L111-L132
[ "def _check_cmd(call):\n '''\n Check the output of the cmd.run_all function call.\n '''\n if call['retcode'] != 0:\n comment = ''\n std_err = call.get('stderr')\n std_out = call.get('stdout')\n if std_err:\n comment += std_err\n if std_out:\n comment += std_out\n\n raise CommandExecutionError('Error running command: {0}'.format(comment))\n\n return call\n" ]
# -*- coding: utf-8 -*- ''' macOS implementations of various commands in the "desktop" interface ''' from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.platform from salt.exceptions import CommandExecutionError # Define the module's virtual name __virtualname__ = 'desktop' def __virtual__(): ''' Only load on Mac systems ''' if salt.utils.platform.is_darwin(): return __virtualname__ return False, 'Cannot load macOS desktop module: This is not a macOS host.' def get_output_volume(): ''' Get the output volume (range 0 to 100) CLI Example: .. code-block:: bash salt '*' desktop.get_output_volume ''' cmd = 'osascript -e "get output volume of (get volume settings)"' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return call.get('stdout') def set_output_volume(volume): ''' Set the volume of sound. volume The level of volume. Can range from 0 to 100. CLI Example: .. code-block:: bash salt '*' desktop.set_output_volume <volume> ''' cmd = 'osascript -e "set volume output volume {0}"'.format(volume) call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return get_output_volume() def screensaver(): ''' Launch the screensaver. CLI Example: .. code-block:: bash salt '*' desktop.screensaver ''' cmd = 'open /System/Library/Frameworks/ScreenSaver.framework/Versions/A/Resources/ScreenSaverEngine.app' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True def lock(): ''' Lock the desktop session CLI Example: .. code-block:: bash salt '*' desktop.lock ''' cmd = '/System/Library/CoreServices/Menu\\ Extras/User.menu/Contents/Resources/CGSession -suspend' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True def _check_cmd(call): ''' Check the output of the cmd.run_all function call. ''' if call['retcode'] != 0: comment = '' std_err = call.get('stderr') std_out = call.get('stdout') if std_err: comment += std_err if std_out: comment += std_out raise CommandExecutionError('Error running command: {0}'.format(comment)) return call
saltstack/salt
salt/modules/mac_desktop.py
_check_cmd
python
def _check_cmd(call): ''' Check the output of the cmd.run_all function call. ''' if call['retcode'] != 0: comment = '' std_err = call.get('stderr') std_out = call.get('stdout') if std_err: comment += std_err if std_out: comment += std_out raise CommandExecutionError('Error running command: {0}'.format(comment)) return call
Check the output of the cmd.run_all function call.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_desktop.py#L135-L150
null
# -*- coding: utf-8 -*- ''' macOS implementations of various commands in the "desktop" interface ''' from __future__ import absolute_import, unicode_literals, print_function # Import Salt libs import salt.utils.platform from salt.exceptions import CommandExecutionError # Define the module's virtual name __virtualname__ = 'desktop' def __virtual__(): ''' Only load on Mac systems ''' if salt.utils.platform.is_darwin(): return __virtualname__ return False, 'Cannot load macOS desktop module: This is not a macOS host.' def get_output_volume(): ''' Get the output volume (range 0 to 100) CLI Example: .. code-block:: bash salt '*' desktop.get_output_volume ''' cmd = 'osascript -e "get output volume of (get volume settings)"' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return call.get('stdout') def set_output_volume(volume): ''' Set the volume of sound. volume The level of volume. Can range from 0 to 100. CLI Example: .. code-block:: bash salt '*' desktop.set_output_volume <volume> ''' cmd = 'osascript -e "set volume output volume {0}"'.format(volume) call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return get_output_volume() def screensaver(): ''' Launch the screensaver. CLI Example: .. code-block:: bash salt '*' desktop.screensaver ''' cmd = 'open /System/Library/Frameworks/ScreenSaver.framework/Versions/A/Resources/ScreenSaverEngine.app' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True def lock(): ''' Lock the desktop session CLI Example: .. code-block:: bash salt '*' desktop.lock ''' cmd = '/System/Library/CoreServices/Menu\\ Extras/User.menu/Contents/Resources/CGSession -suspend' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True def say(*words): ''' Say some words. words The words to execute the say command with. CLI Example: .. code-block:: bash salt '*' desktop.say <word0> <word1> ... <wordN> ''' cmd = 'say {0}'.format(' '.join(words)) call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True
saltstack/salt
salt/utils/win_pdh.py
build_counter_list
python
def build_counter_list(counter_list): r''' Create a list of Counter objects to be used in the pdh query Args: counter_list (list): A list of tuples containing counter information. Each tuple should contain the object, instance, and counter name. For example, to get the ``% Processor Time`` counter for all Processors on the system (``\Processor(*)\% Processor Time``) you would pass a tuple like this: ``` counter_list = [('Processor', '*', '% Processor Time')] ``` If there is no ``instance`` for the counter, pass ``None`` Multiple counters can be passed like so: ``` counter_list = [('Processor', '*', '% Processor Time'), ('System', None, 'Context Switches/sec')] ``` .. note:: Invalid counters are ignored Returns: list: A list of Counter objects ''' counters = [] index = 0 for obj, instance, counter_name in counter_list: try: counter = Counter.build_counter(obj, instance, index, counter_name) index += 1 counters.append(counter) except CommandExecutionError as exc: # Not a valid counter log.debug(exc.strerror) continue return counters
r''' Create a list of Counter objects to be used in the pdh query Args: counter_list (list): A list of tuples containing counter information. Each tuple should contain the object, instance, and counter name. For example, to get the ``% Processor Time`` counter for all Processors on the system (``\Processor(*)\% Processor Time``) you would pass a tuple like this: ``` counter_list = [('Processor', '*', '% Processor Time')] ``` If there is no ``instance`` for the counter, pass ``None`` Multiple counters can be passed like so: ``` counter_list = [('Processor', '*', '% Processor Time'), ('System', None, 'Context Switches/sec')] ``` .. note:: Invalid counters are ignored Returns: list: A list of Counter objects
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_pdh.py#L292-L334
[ "def build_counter(obj, instance, instance_index, counter):\n r'''\n Makes a fully resolved counter path. Counter names are formatted like\n this:\n\n ``\\Processor(*)\\% Processor Time``\n\n The above breaks down like this:\n\n obj = 'Processor'\n instance = '*'\n counter = '% Processor Time'\n\n Args:\n\n obj (str):\n The top level object\n\n instance (str):\n The instance of the object\n\n instance_index (int):\n The index of the instance. Can usually be 0\n\n counter (str):\n The name of the counter\n\n Returns:\n Counter: A Counter object with the path if valid\n\n Raises:\n CommandExecutionError: If the path is invalid\n '''\n path = win32pdh.MakeCounterPath(\n (None, obj, instance, None, instance_index, counter), 0)\n if win32pdh.ValidatePath(path) is 0:\n return Counter(path, obj, instance, instance_index, counter)\n raise CommandExecutionError('Invalid counter specified: {0}'.format(path))\n" ]
# -*- coding: utf-8 -*- r''' Salt Util for getting system information with the Performance Data Helper (pdh). Counter information is gathered from current activity or log files. Usage: .. code-block:: python import salt.utils.win_pdh # Get a list of Counter objects salt.utils.win_pdh.list_objects() # Get a list of ``Processor`` instances salt.utils.win_pdh.list_instances('Processor') # Get a list of ``Processor`` counters salt.utils.win_pdh.list_counters('Processor') # Get the value of a single counter # \Processor(*)\% Processor Time salt.utils.win_pdh.get_counter('Processor', '*', '% Processor Time') # Get the values of multiple counters counter_list = [('Processor', '*', '% Processor Time'), ('System', None, 'Context Switches/sec'), ('Memory', None, 'Pages/sec'), ('Server Work Queues', '*', 'Queue Length')] salt.utils.win_pdh.get_counters(counter_list) # Get all counters for the Processor object salt.utils.win_pdh.get_all_counters('Processor') ''' # https://www.cac.cornell.edu/wiki/index.php?title=Performance_Data_Helper_in_Python_with_win32pdh # https://docs.microsoft.com/en-us/windows/desktop/perfctrs/using-the-pdh-functions-to-consume-counter-data # Import python libs from __future__ import absolute_import, unicode_literals import logging import time # Import 3rd party libs try: import pywintypes import win32pdh HAS_WINDOWS_MODULES = True except ImportError: HAS_WINDOWS_MODULES = False # Import salt libs import salt.utils.platform from salt.exceptions import CommandExecutionError log = logging.getLogger(__file__) # Define the virtual name __virtualname__ = 'pdh' def __virtual__(): ''' Only works on Windows systems with the PyWin32 ''' if not salt.utils.platform.is_windows(): return False, 'salt.utils.win_pdh: Requires Windows' if not HAS_WINDOWS_MODULES: return False, 'salt.utils.win_pdh: Missing required modules' return __virtualname__ class Counter(object): ''' Counter object Has enumerations and functions for working with counters ''' # The dwType field from GetCounterInfo returns the following, or'ed. # These come from WinPerf.h PERF_SIZE_DWORD = 0x00000000 PERF_SIZE_LARGE = 0x00000100 PERF_SIZE_ZERO = 0x00000200 # for Zero Length fields PERF_SIZE_VARIABLE_LEN = 0x00000300 # length is in the CounterLength field of the Counter Definition structure # select one of the following values to indicate the counter field usage PERF_TYPE_NUMBER = 0x00000000 # a number (not a counter) PERF_TYPE_COUNTER = 0x00000400 # an increasing numeric value PERF_TYPE_TEXT = 0x00000800 # a text field PERF_TYPE_ZERO = 0x00000C00 # displays a zero # If the PERF_TYPE_NUMBER field was selected, then select one of the # following to describe the Number PERF_NUMBER_HEX = 0x00000000 # display as HEX value PERF_NUMBER_DECIMAL = 0x00010000 # display as a decimal integer PERF_NUMBER_DEC_1000 = 0x00020000 # display as a decimal/1000 # If the PERF_TYPE_COUNTER value was selected then select one of the # following to indicate the type of counter PERF_COUNTER_VALUE = 0x00000000 # display counter value PERF_COUNTER_RATE = 0x00010000 # divide ctr / delta time PERF_COUNTER_FRACTION = 0x00020000 # divide ctr / base PERF_COUNTER_BASE = 0x00030000 # base value used in fractions PERF_COUNTER_ELAPSED = 0x00040000 # subtract counter from current time PERF_COUNTER_QUEUE_LEN = 0x00050000 # Use Queue len processing func. PERF_COUNTER_HISTOGRAM = 0x00060000 # Counter begins or ends a histogram # If the PERF_TYPE_TEXT value was selected, then select one of the # following to indicate the type of TEXT data. PERF_TEXT_UNICODE = 0x00000000 # type of text in text field PERF_TEXT_ASCII = 0x00010000 # ASCII using the CodePage field # Timer SubTypes PERF_TIMER_TICK = 0x00000000 # use system perf. freq for base PERF_TIMER_100NS = 0x00100000 # use 100 NS timer time base units PERF_OBJECT_TIMER = 0x00200000 # use the object timer freq # Any types that have calculations performed can use one or more of the # following calculation modification flags listed here PERF_DELTA_COUNTER = 0x00400000 # compute difference first PERF_DELTA_BASE = 0x00800000 # compute base diff as well PERF_INVERSE_COUNTER = 0x01000000 # show as 1.00-value (assumes: PERF_MULTI_COUNTER = 0x02000000 # sum of multiple instances # Select one of the following values to indicate the display suffix (if any) PERF_DISPLAY_NO_SUFFIX = 0x00000000 # no suffix PERF_DISPLAY_PER_SEC = 0x10000000 # "/sec" PERF_DISPLAY_PERCENT = 0x20000000 # "%" PERF_DISPLAY_SECONDS = 0x30000000 # "secs" PERF_DISPLAY_NO_SHOW = 0x40000000 # value is not displayed def build_counter(obj, instance, instance_index, counter): r''' Makes a fully resolved counter path. Counter names are formatted like this: ``\Processor(*)\% Processor Time`` The above breaks down like this: obj = 'Processor' instance = '*' counter = '% Processor Time' Args: obj (str): The top level object instance (str): The instance of the object instance_index (int): The index of the instance. Can usually be 0 counter (str): The name of the counter Returns: Counter: A Counter object with the path if valid Raises: CommandExecutionError: If the path is invalid ''' path = win32pdh.MakeCounterPath( (None, obj, instance, None, instance_index, counter), 0) if win32pdh.ValidatePath(path) is 0: return Counter(path, obj, instance, instance_index, counter) raise CommandExecutionError('Invalid counter specified: {0}'.format(path)) build_counter = staticmethod(build_counter) def __init__(self, path, obj, instance, index, counter): self.path = path self.obj = obj self.instance = instance self.index = index self.counter = counter self.handle = None self.info = None self.type = None def add_to_query(self, query): ''' Add the current path to the query Args: query (obj): The handle to the query to add the counter ''' self.handle = win32pdh.AddCounter(query, self.path) def get_info(self): ''' Get information about the counter .. note:: GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes if this is called after sampling data. ''' if not self.info: ci = win32pdh.GetCounterInfo(self.handle, 0) self.info = { 'type': ci[0], 'version': ci[1], 'scale': ci[2], 'default_scale': ci[3], 'user_data': ci[4], 'query_user_data': ci[5], 'full_path': ci[6], 'machine_name': ci[7][0], 'object_name': ci[7][1], 'instance_name': ci[7][2], 'parent_instance': ci[7][3], 'instance_index': ci[7][4], 'counter_name': ci[7][5], 'explain_text': ci[8] } return self.info def value(self): ''' Return the counter value Returns: long: The counter value ''' (counter_type, value) = win32pdh.GetFormattedCounterValue( self.handle, win32pdh.PDH_FMT_DOUBLE) self.type = counter_type return value def type_string(self): ''' Returns the names of the flags that are set in the Type field It can be used to format the counter. ''' type = self.get_info()['type'] type_list = [] for member in dir(self): if member.startswith("PERF_"): bit = getattr(self, member) if bit and bit & type: type_list.append(member[5:]) return type_list def __str__(self): return self.path def list_objects(): ''' Get a list of available counter objects on the system Returns: list: A list of counter objects ''' return sorted(win32pdh.EnumObjects(None, None, -1, 0)) def list_counters(obj): ''' Get a list of counters available for the object Args: obj (str): The name of the counter object. You can get a list of valid names using the ``list_objects`` function Returns: list: A list of counters available to the passed object ''' return win32pdh.EnumObjectItems(None, None, obj, -1, 0)[0] def list_instances(obj): ''' Get a list of instances available for the object Args: obj (str): The name of the counter object. You can get a list of valid names using the ``list_objects`` function Returns: list: A list of instances available to the passed object ''' return win32pdh.EnumObjectItems(None, None, obj, -1, 0)[1] def get_all_counters(obj, instance_list=None): ''' Get the values for all counters available to a Counter object Args: obj (str): The name of the counter object. You can get a list of valid names using the ``list_objects`` function instance_list (list): A list of instances to return. Use this to narrow down the counters that are returned. .. note:: ``_Total`` is returned as ``*`` ''' counters, instances_avail = win32pdh.EnumObjectItems(None, None, obj, -1, 0) if instance_list is None: instance_list = instances_avail if not isinstance(instance_list, list): instance_list = [instance_list] counter_list = [] for counter in counters: for instance in instance_list: instance = '*' if instance.lower() == '_total' else instance counter_list.append((obj, instance, counter)) else: # pylint: disable=useless-else-on-loop counter_list.append((obj, None, counter)) return get_counters(counter_list) if counter_list else {} def get_counters(counter_list): ''' Get the values for the passes list of counters Args: counter_list (list): A list of counters to lookup Returns: dict: A dictionary of counters and their values ''' if not isinstance(counter_list, list): raise CommandExecutionError('counter_list must be a list of tuples') try: # Start a Query instances query = win32pdh.OpenQuery() # Build the counters counters = build_counter_list(counter_list) # Add counters to the Query for counter in counters: counter.add_to_query(query) # https://docs.microsoft.com/en-us/windows/desktop/perfctrs/collecting-performance-data win32pdh.CollectQueryData(query) # The sleep here is required for counters that require more than 1 # reading time.sleep(1) win32pdh.CollectQueryData(query) ret = {} for counter in counters: try: ret.update({counter.path: counter.value()}) except pywintypes.error as exc: if exc.strerror == 'No data to return.': # Some counters are not active and will throw an error if # there is no data to return continue else: raise finally: win32pdh.CloseQuery(query) return ret def get_counter(obj, instance, counter): ''' Get the value of a single counter Args: obj (str): The name of the counter object. You can get a list of valid names using the ``list_objects`` function instance (str): The counter instance you wish to return. Get a list of instances using the ``list_instances`` function .. note:: ``_Total`` is returned as ``*`` counter (str): The name of the counter. Get a list of counters using the ``list_counters`` function ''' return get_counters([(obj, instance, counter)])
saltstack/salt
salt/utils/win_pdh.py
get_all_counters
python
def get_all_counters(obj, instance_list=None): ''' Get the values for all counters available to a Counter object Args: obj (str): The name of the counter object. You can get a list of valid names using the ``list_objects`` function instance_list (list): A list of instances to return. Use this to narrow down the counters that are returned. .. note:: ``_Total`` is returned as ``*`` ''' counters, instances_avail = win32pdh.EnumObjectItems(None, None, obj, -1, 0) if instance_list is None: instance_list = instances_avail if not isinstance(instance_list, list): instance_list = [instance_list] counter_list = [] for counter in counters: for instance in instance_list: instance = '*' if instance.lower() == '_total' else instance counter_list.append((obj, instance, counter)) else: # pylint: disable=useless-else-on-loop counter_list.append((obj, None, counter)) return get_counters(counter_list) if counter_list else {}
Get the values for all counters available to a Counter object Args: obj (str): The name of the counter object. You can get a list of valid names using the ``list_objects`` function instance_list (list): A list of instances to return. Use this to narrow down the counters that are returned. .. note:: ``_Total`` is returned as ``*``
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_pdh.py#L337-L370
[ "def get_counters(counter_list):\n '''\n Get the values for the passes list of counters\n\n Args:\n counter_list (list):\n A list of counters to lookup\n\n Returns:\n dict: A dictionary of counters and their values\n '''\n if not isinstance(counter_list, list):\n raise CommandExecutionError('counter_list must be a list of tuples')\n\n try:\n # Start a Query instances\n query = win32pdh.OpenQuery()\n\n # Build the counters\n counters = build_counter_list(counter_list)\n\n # Add counters to the Query\n for counter in counters:\n counter.add_to_query(query)\n\n # https://docs.microsoft.com/en-us/windows/desktop/perfctrs/collecting-performance-data\n win32pdh.CollectQueryData(query)\n # The sleep here is required for counters that require more than 1\n # reading\n time.sleep(1)\n win32pdh.CollectQueryData(query)\n ret = {}\n\n for counter in counters:\n try:\n ret.update({counter.path: counter.value()})\n except pywintypes.error as exc:\n if exc.strerror == 'No data to return.':\n # Some counters are not active and will throw an error if\n # there is no data to return\n continue\n else:\n raise\n\n finally:\n win32pdh.CloseQuery(query)\n\n return ret\n" ]
# -*- coding: utf-8 -*- r''' Salt Util for getting system information with the Performance Data Helper (pdh). Counter information is gathered from current activity or log files. Usage: .. code-block:: python import salt.utils.win_pdh # Get a list of Counter objects salt.utils.win_pdh.list_objects() # Get a list of ``Processor`` instances salt.utils.win_pdh.list_instances('Processor') # Get a list of ``Processor`` counters salt.utils.win_pdh.list_counters('Processor') # Get the value of a single counter # \Processor(*)\% Processor Time salt.utils.win_pdh.get_counter('Processor', '*', '% Processor Time') # Get the values of multiple counters counter_list = [('Processor', '*', '% Processor Time'), ('System', None, 'Context Switches/sec'), ('Memory', None, 'Pages/sec'), ('Server Work Queues', '*', 'Queue Length')] salt.utils.win_pdh.get_counters(counter_list) # Get all counters for the Processor object salt.utils.win_pdh.get_all_counters('Processor') ''' # https://www.cac.cornell.edu/wiki/index.php?title=Performance_Data_Helper_in_Python_with_win32pdh # https://docs.microsoft.com/en-us/windows/desktop/perfctrs/using-the-pdh-functions-to-consume-counter-data # Import python libs from __future__ import absolute_import, unicode_literals import logging import time # Import 3rd party libs try: import pywintypes import win32pdh HAS_WINDOWS_MODULES = True except ImportError: HAS_WINDOWS_MODULES = False # Import salt libs import salt.utils.platform from salt.exceptions import CommandExecutionError log = logging.getLogger(__file__) # Define the virtual name __virtualname__ = 'pdh' def __virtual__(): ''' Only works on Windows systems with the PyWin32 ''' if not salt.utils.platform.is_windows(): return False, 'salt.utils.win_pdh: Requires Windows' if not HAS_WINDOWS_MODULES: return False, 'salt.utils.win_pdh: Missing required modules' return __virtualname__ class Counter(object): ''' Counter object Has enumerations and functions for working with counters ''' # The dwType field from GetCounterInfo returns the following, or'ed. # These come from WinPerf.h PERF_SIZE_DWORD = 0x00000000 PERF_SIZE_LARGE = 0x00000100 PERF_SIZE_ZERO = 0x00000200 # for Zero Length fields PERF_SIZE_VARIABLE_LEN = 0x00000300 # length is in the CounterLength field of the Counter Definition structure # select one of the following values to indicate the counter field usage PERF_TYPE_NUMBER = 0x00000000 # a number (not a counter) PERF_TYPE_COUNTER = 0x00000400 # an increasing numeric value PERF_TYPE_TEXT = 0x00000800 # a text field PERF_TYPE_ZERO = 0x00000C00 # displays a zero # If the PERF_TYPE_NUMBER field was selected, then select one of the # following to describe the Number PERF_NUMBER_HEX = 0x00000000 # display as HEX value PERF_NUMBER_DECIMAL = 0x00010000 # display as a decimal integer PERF_NUMBER_DEC_1000 = 0x00020000 # display as a decimal/1000 # If the PERF_TYPE_COUNTER value was selected then select one of the # following to indicate the type of counter PERF_COUNTER_VALUE = 0x00000000 # display counter value PERF_COUNTER_RATE = 0x00010000 # divide ctr / delta time PERF_COUNTER_FRACTION = 0x00020000 # divide ctr / base PERF_COUNTER_BASE = 0x00030000 # base value used in fractions PERF_COUNTER_ELAPSED = 0x00040000 # subtract counter from current time PERF_COUNTER_QUEUE_LEN = 0x00050000 # Use Queue len processing func. PERF_COUNTER_HISTOGRAM = 0x00060000 # Counter begins or ends a histogram # If the PERF_TYPE_TEXT value was selected, then select one of the # following to indicate the type of TEXT data. PERF_TEXT_UNICODE = 0x00000000 # type of text in text field PERF_TEXT_ASCII = 0x00010000 # ASCII using the CodePage field # Timer SubTypes PERF_TIMER_TICK = 0x00000000 # use system perf. freq for base PERF_TIMER_100NS = 0x00100000 # use 100 NS timer time base units PERF_OBJECT_TIMER = 0x00200000 # use the object timer freq # Any types that have calculations performed can use one or more of the # following calculation modification flags listed here PERF_DELTA_COUNTER = 0x00400000 # compute difference first PERF_DELTA_BASE = 0x00800000 # compute base diff as well PERF_INVERSE_COUNTER = 0x01000000 # show as 1.00-value (assumes: PERF_MULTI_COUNTER = 0x02000000 # sum of multiple instances # Select one of the following values to indicate the display suffix (if any) PERF_DISPLAY_NO_SUFFIX = 0x00000000 # no suffix PERF_DISPLAY_PER_SEC = 0x10000000 # "/sec" PERF_DISPLAY_PERCENT = 0x20000000 # "%" PERF_DISPLAY_SECONDS = 0x30000000 # "secs" PERF_DISPLAY_NO_SHOW = 0x40000000 # value is not displayed def build_counter(obj, instance, instance_index, counter): r''' Makes a fully resolved counter path. Counter names are formatted like this: ``\Processor(*)\% Processor Time`` The above breaks down like this: obj = 'Processor' instance = '*' counter = '% Processor Time' Args: obj (str): The top level object instance (str): The instance of the object instance_index (int): The index of the instance. Can usually be 0 counter (str): The name of the counter Returns: Counter: A Counter object with the path if valid Raises: CommandExecutionError: If the path is invalid ''' path = win32pdh.MakeCounterPath( (None, obj, instance, None, instance_index, counter), 0) if win32pdh.ValidatePath(path) is 0: return Counter(path, obj, instance, instance_index, counter) raise CommandExecutionError('Invalid counter specified: {0}'.format(path)) build_counter = staticmethod(build_counter) def __init__(self, path, obj, instance, index, counter): self.path = path self.obj = obj self.instance = instance self.index = index self.counter = counter self.handle = None self.info = None self.type = None def add_to_query(self, query): ''' Add the current path to the query Args: query (obj): The handle to the query to add the counter ''' self.handle = win32pdh.AddCounter(query, self.path) def get_info(self): ''' Get information about the counter .. note:: GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes if this is called after sampling data. ''' if not self.info: ci = win32pdh.GetCounterInfo(self.handle, 0) self.info = { 'type': ci[0], 'version': ci[1], 'scale': ci[2], 'default_scale': ci[3], 'user_data': ci[4], 'query_user_data': ci[5], 'full_path': ci[6], 'machine_name': ci[7][0], 'object_name': ci[7][1], 'instance_name': ci[7][2], 'parent_instance': ci[7][3], 'instance_index': ci[7][4], 'counter_name': ci[7][5], 'explain_text': ci[8] } return self.info def value(self): ''' Return the counter value Returns: long: The counter value ''' (counter_type, value) = win32pdh.GetFormattedCounterValue( self.handle, win32pdh.PDH_FMT_DOUBLE) self.type = counter_type return value def type_string(self): ''' Returns the names of the flags that are set in the Type field It can be used to format the counter. ''' type = self.get_info()['type'] type_list = [] for member in dir(self): if member.startswith("PERF_"): bit = getattr(self, member) if bit and bit & type: type_list.append(member[5:]) return type_list def __str__(self): return self.path def list_objects(): ''' Get a list of available counter objects on the system Returns: list: A list of counter objects ''' return sorted(win32pdh.EnumObjects(None, None, -1, 0)) def list_counters(obj): ''' Get a list of counters available for the object Args: obj (str): The name of the counter object. You can get a list of valid names using the ``list_objects`` function Returns: list: A list of counters available to the passed object ''' return win32pdh.EnumObjectItems(None, None, obj, -1, 0)[0] def list_instances(obj): ''' Get a list of instances available for the object Args: obj (str): The name of the counter object. You can get a list of valid names using the ``list_objects`` function Returns: list: A list of instances available to the passed object ''' return win32pdh.EnumObjectItems(None, None, obj, -1, 0)[1] def build_counter_list(counter_list): r''' Create a list of Counter objects to be used in the pdh query Args: counter_list (list): A list of tuples containing counter information. Each tuple should contain the object, instance, and counter name. For example, to get the ``% Processor Time`` counter for all Processors on the system (``\Processor(*)\% Processor Time``) you would pass a tuple like this: ``` counter_list = [('Processor', '*', '% Processor Time')] ``` If there is no ``instance`` for the counter, pass ``None`` Multiple counters can be passed like so: ``` counter_list = [('Processor', '*', '% Processor Time'), ('System', None, 'Context Switches/sec')] ``` .. note:: Invalid counters are ignored Returns: list: A list of Counter objects ''' counters = [] index = 0 for obj, instance, counter_name in counter_list: try: counter = Counter.build_counter(obj, instance, index, counter_name) index += 1 counters.append(counter) except CommandExecutionError as exc: # Not a valid counter log.debug(exc.strerror) continue return counters def get_counters(counter_list): ''' Get the values for the passes list of counters Args: counter_list (list): A list of counters to lookup Returns: dict: A dictionary of counters and their values ''' if not isinstance(counter_list, list): raise CommandExecutionError('counter_list must be a list of tuples') try: # Start a Query instances query = win32pdh.OpenQuery() # Build the counters counters = build_counter_list(counter_list) # Add counters to the Query for counter in counters: counter.add_to_query(query) # https://docs.microsoft.com/en-us/windows/desktop/perfctrs/collecting-performance-data win32pdh.CollectQueryData(query) # The sleep here is required for counters that require more than 1 # reading time.sleep(1) win32pdh.CollectQueryData(query) ret = {} for counter in counters: try: ret.update({counter.path: counter.value()}) except pywintypes.error as exc: if exc.strerror == 'No data to return.': # Some counters are not active and will throw an error if # there is no data to return continue else: raise finally: win32pdh.CloseQuery(query) return ret def get_counter(obj, instance, counter): ''' Get the value of a single counter Args: obj (str): The name of the counter object. You can get a list of valid names using the ``list_objects`` function instance (str): The counter instance you wish to return. Get a list of instances using the ``list_instances`` function .. note:: ``_Total`` is returned as ``*`` counter (str): The name of the counter. Get a list of counters using the ``list_counters`` function ''' return get_counters([(obj, instance, counter)])
saltstack/salt
salt/utils/win_pdh.py
get_counters
python
def get_counters(counter_list): ''' Get the values for the passes list of counters Args: counter_list (list): A list of counters to lookup Returns: dict: A dictionary of counters and their values ''' if not isinstance(counter_list, list): raise CommandExecutionError('counter_list must be a list of tuples') try: # Start a Query instances query = win32pdh.OpenQuery() # Build the counters counters = build_counter_list(counter_list) # Add counters to the Query for counter in counters: counter.add_to_query(query) # https://docs.microsoft.com/en-us/windows/desktop/perfctrs/collecting-performance-data win32pdh.CollectQueryData(query) # The sleep here is required for counters that require more than 1 # reading time.sleep(1) win32pdh.CollectQueryData(query) ret = {} for counter in counters: try: ret.update({counter.path: counter.value()}) except pywintypes.error as exc: if exc.strerror == 'No data to return.': # Some counters are not active and will throw an error if # there is no data to return continue else: raise finally: win32pdh.CloseQuery(query) return ret
Get the values for the passes list of counters Args: counter_list (list): A list of counters to lookup Returns: dict: A dictionary of counters and their values
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_pdh.py#L373-L420
[ "def build_counter_list(counter_list):\n r'''\n Create a list of Counter objects to be used in the pdh query\n\n Args:\n counter_list (list):\n A list of tuples containing counter information. Each tuple should\n contain the object, instance, and counter name. For example, to\n get the ``% Processor Time`` counter for all Processors on the\n system (``\\Processor(*)\\% Processor Time``) you would pass a tuple\n like this:\n\n ```\n counter_list = [('Processor', '*', '% Processor Time')]\n ```\n\n If there is no ``instance`` for the counter, pass ``None``\n\n Multiple counters can be passed like so:\n\n ```\n counter_list = [('Processor', '*', '% Processor Time'),\n ('System', None, 'Context Switches/sec')]\n ```\n\n .. note::\n Invalid counters are ignored\n\n Returns:\n list: A list of Counter objects\n '''\n counters = []\n index = 0\n for obj, instance, counter_name in counter_list:\n try:\n counter = Counter.build_counter(obj, instance, index, counter_name)\n index += 1\n counters.append(counter)\n except CommandExecutionError as exc:\n # Not a valid counter\n log.debug(exc.strerror)\n continue\n return counters\n" ]
# -*- coding: utf-8 -*- r''' Salt Util for getting system information with the Performance Data Helper (pdh). Counter information is gathered from current activity or log files. Usage: .. code-block:: python import salt.utils.win_pdh # Get a list of Counter objects salt.utils.win_pdh.list_objects() # Get a list of ``Processor`` instances salt.utils.win_pdh.list_instances('Processor') # Get a list of ``Processor`` counters salt.utils.win_pdh.list_counters('Processor') # Get the value of a single counter # \Processor(*)\% Processor Time salt.utils.win_pdh.get_counter('Processor', '*', '% Processor Time') # Get the values of multiple counters counter_list = [('Processor', '*', '% Processor Time'), ('System', None, 'Context Switches/sec'), ('Memory', None, 'Pages/sec'), ('Server Work Queues', '*', 'Queue Length')] salt.utils.win_pdh.get_counters(counter_list) # Get all counters for the Processor object salt.utils.win_pdh.get_all_counters('Processor') ''' # https://www.cac.cornell.edu/wiki/index.php?title=Performance_Data_Helper_in_Python_with_win32pdh # https://docs.microsoft.com/en-us/windows/desktop/perfctrs/using-the-pdh-functions-to-consume-counter-data # Import python libs from __future__ import absolute_import, unicode_literals import logging import time # Import 3rd party libs try: import pywintypes import win32pdh HAS_WINDOWS_MODULES = True except ImportError: HAS_WINDOWS_MODULES = False # Import salt libs import salt.utils.platform from salt.exceptions import CommandExecutionError log = logging.getLogger(__file__) # Define the virtual name __virtualname__ = 'pdh' def __virtual__(): ''' Only works on Windows systems with the PyWin32 ''' if not salt.utils.platform.is_windows(): return False, 'salt.utils.win_pdh: Requires Windows' if not HAS_WINDOWS_MODULES: return False, 'salt.utils.win_pdh: Missing required modules' return __virtualname__ class Counter(object): ''' Counter object Has enumerations and functions for working with counters ''' # The dwType field from GetCounterInfo returns the following, or'ed. # These come from WinPerf.h PERF_SIZE_DWORD = 0x00000000 PERF_SIZE_LARGE = 0x00000100 PERF_SIZE_ZERO = 0x00000200 # for Zero Length fields PERF_SIZE_VARIABLE_LEN = 0x00000300 # length is in the CounterLength field of the Counter Definition structure # select one of the following values to indicate the counter field usage PERF_TYPE_NUMBER = 0x00000000 # a number (not a counter) PERF_TYPE_COUNTER = 0x00000400 # an increasing numeric value PERF_TYPE_TEXT = 0x00000800 # a text field PERF_TYPE_ZERO = 0x00000C00 # displays a zero # If the PERF_TYPE_NUMBER field was selected, then select one of the # following to describe the Number PERF_NUMBER_HEX = 0x00000000 # display as HEX value PERF_NUMBER_DECIMAL = 0x00010000 # display as a decimal integer PERF_NUMBER_DEC_1000 = 0x00020000 # display as a decimal/1000 # If the PERF_TYPE_COUNTER value was selected then select one of the # following to indicate the type of counter PERF_COUNTER_VALUE = 0x00000000 # display counter value PERF_COUNTER_RATE = 0x00010000 # divide ctr / delta time PERF_COUNTER_FRACTION = 0x00020000 # divide ctr / base PERF_COUNTER_BASE = 0x00030000 # base value used in fractions PERF_COUNTER_ELAPSED = 0x00040000 # subtract counter from current time PERF_COUNTER_QUEUE_LEN = 0x00050000 # Use Queue len processing func. PERF_COUNTER_HISTOGRAM = 0x00060000 # Counter begins or ends a histogram # If the PERF_TYPE_TEXT value was selected, then select one of the # following to indicate the type of TEXT data. PERF_TEXT_UNICODE = 0x00000000 # type of text in text field PERF_TEXT_ASCII = 0x00010000 # ASCII using the CodePage field # Timer SubTypes PERF_TIMER_TICK = 0x00000000 # use system perf. freq for base PERF_TIMER_100NS = 0x00100000 # use 100 NS timer time base units PERF_OBJECT_TIMER = 0x00200000 # use the object timer freq # Any types that have calculations performed can use one or more of the # following calculation modification flags listed here PERF_DELTA_COUNTER = 0x00400000 # compute difference first PERF_DELTA_BASE = 0x00800000 # compute base diff as well PERF_INVERSE_COUNTER = 0x01000000 # show as 1.00-value (assumes: PERF_MULTI_COUNTER = 0x02000000 # sum of multiple instances # Select one of the following values to indicate the display suffix (if any) PERF_DISPLAY_NO_SUFFIX = 0x00000000 # no suffix PERF_DISPLAY_PER_SEC = 0x10000000 # "/sec" PERF_DISPLAY_PERCENT = 0x20000000 # "%" PERF_DISPLAY_SECONDS = 0x30000000 # "secs" PERF_DISPLAY_NO_SHOW = 0x40000000 # value is not displayed def build_counter(obj, instance, instance_index, counter): r''' Makes a fully resolved counter path. Counter names are formatted like this: ``\Processor(*)\% Processor Time`` The above breaks down like this: obj = 'Processor' instance = '*' counter = '% Processor Time' Args: obj (str): The top level object instance (str): The instance of the object instance_index (int): The index of the instance. Can usually be 0 counter (str): The name of the counter Returns: Counter: A Counter object with the path if valid Raises: CommandExecutionError: If the path is invalid ''' path = win32pdh.MakeCounterPath( (None, obj, instance, None, instance_index, counter), 0) if win32pdh.ValidatePath(path) is 0: return Counter(path, obj, instance, instance_index, counter) raise CommandExecutionError('Invalid counter specified: {0}'.format(path)) build_counter = staticmethod(build_counter) def __init__(self, path, obj, instance, index, counter): self.path = path self.obj = obj self.instance = instance self.index = index self.counter = counter self.handle = None self.info = None self.type = None def add_to_query(self, query): ''' Add the current path to the query Args: query (obj): The handle to the query to add the counter ''' self.handle = win32pdh.AddCounter(query, self.path) def get_info(self): ''' Get information about the counter .. note:: GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes if this is called after sampling data. ''' if not self.info: ci = win32pdh.GetCounterInfo(self.handle, 0) self.info = { 'type': ci[0], 'version': ci[1], 'scale': ci[2], 'default_scale': ci[3], 'user_data': ci[4], 'query_user_data': ci[5], 'full_path': ci[6], 'machine_name': ci[7][0], 'object_name': ci[7][1], 'instance_name': ci[7][2], 'parent_instance': ci[7][3], 'instance_index': ci[7][4], 'counter_name': ci[7][5], 'explain_text': ci[8] } return self.info def value(self): ''' Return the counter value Returns: long: The counter value ''' (counter_type, value) = win32pdh.GetFormattedCounterValue( self.handle, win32pdh.PDH_FMT_DOUBLE) self.type = counter_type return value def type_string(self): ''' Returns the names of the flags that are set in the Type field It can be used to format the counter. ''' type = self.get_info()['type'] type_list = [] for member in dir(self): if member.startswith("PERF_"): bit = getattr(self, member) if bit and bit & type: type_list.append(member[5:]) return type_list def __str__(self): return self.path def list_objects(): ''' Get a list of available counter objects on the system Returns: list: A list of counter objects ''' return sorted(win32pdh.EnumObjects(None, None, -1, 0)) def list_counters(obj): ''' Get a list of counters available for the object Args: obj (str): The name of the counter object. You can get a list of valid names using the ``list_objects`` function Returns: list: A list of counters available to the passed object ''' return win32pdh.EnumObjectItems(None, None, obj, -1, 0)[0] def list_instances(obj): ''' Get a list of instances available for the object Args: obj (str): The name of the counter object. You can get a list of valid names using the ``list_objects`` function Returns: list: A list of instances available to the passed object ''' return win32pdh.EnumObjectItems(None, None, obj, -1, 0)[1] def build_counter_list(counter_list): r''' Create a list of Counter objects to be used in the pdh query Args: counter_list (list): A list of tuples containing counter information. Each tuple should contain the object, instance, and counter name. For example, to get the ``% Processor Time`` counter for all Processors on the system (``\Processor(*)\% Processor Time``) you would pass a tuple like this: ``` counter_list = [('Processor', '*', '% Processor Time')] ``` If there is no ``instance`` for the counter, pass ``None`` Multiple counters can be passed like so: ``` counter_list = [('Processor', '*', '% Processor Time'), ('System', None, 'Context Switches/sec')] ``` .. note:: Invalid counters are ignored Returns: list: A list of Counter objects ''' counters = [] index = 0 for obj, instance, counter_name in counter_list: try: counter = Counter.build_counter(obj, instance, index, counter_name) index += 1 counters.append(counter) except CommandExecutionError as exc: # Not a valid counter log.debug(exc.strerror) continue return counters def get_all_counters(obj, instance_list=None): ''' Get the values for all counters available to a Counter object Args: obj (str): The name of the counter object. You can get a list of valid names using the ``list_objects`` function instance_list (list): A list of instances to return. Use this to narrow down the counters that are returned. .. note:: ``_Total`` is returned as ``*`` ''' counters, instances_avail = win32pdh.EnumObjectItems(None, None, obj, -1, 0) if instance_list is None: instance_list = instances_avail if not isinstance(instance_list, list): instance_list = [instance_list] counter_list = [] for counter in counters: for instance in instance_list: instance = '*' if instance.lower() == '_total' else instance counter_list.append((obj, instance, counter)) else: # pylint: disable=useless-else-on-loop counter_list.append((obj, None, counter)) return get_counters(counter_list) if counter_list else {} def get_counter(obj, instance, counter): ''' Get the value of a single counter Args: obj (str): The name of the counter object. You can get a list of valid names using the ``list_objects`` function instance (str): The counter instance you wish to return. Get a list of instances using the ``list_instances`` function .. note:: ``_Total`` is returned as ``*`` counter (str): The name of the counter. Get a list of counters using the ``list_counters`` function ''' return get_counters([(obj, instance, counter)])
saltstack/salt
salt/utils/win_pdh.py
Counter.build_counter
python
def build_counter(obj, instance, instance_index, counter): r''' Makes a fully resolved counter path. Counter names are formatted like this: ``\Processor(*)\% Processor Time`` The above breaks down like this: obj = 'Processor' instance = '*' counter = '% Processor Time' Args: obj (str): The top level object instance (str): The instance of the object instance_index (int): The index of the instance. Can usually be 0 counter (str): The name of the counter Returns: Counter: A Counter object with the path if valid Raises: CommandExecutionError: If the path is invalid ''' path = win32pdh.MakeCounterPath( (None, obj, instance, None, instance_index, counter), 0) if win32pdh.ValidatePath(path) is 0: return Counter(path, obj, instance, instance_index, counter) raise CommandExecutionError('Invalid counter specified: {0}'.format(path))
r''' Makes a fully resolved counter path. Counter names are formatted like this: ``\Processor(*)\% Processor Time`` The above breaks down like this: obj = 'Processor' instance = '*' counter = '% Processor Time' Args: obj (str): The top level object instance (str): The instance of the object instance_index (int): The index of the instance. Can usually be 0 counter (str): The name of the counter Returns: Counter: A Counter object with the path if valid Raises: CommandExecutionError: If the path is invalid
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_pdh.py#L132-L169
null
class Counter(object): ''' Counter object Has enumerations and functions for working with counters ''' # The dwType field from GetCounterInfo returns the following, or'ed. # These come from WinPerf.h PERF_SIZE_DWORD = 0x00000000 PERF_SIZE_LARGE = 0x00000100 PERF_SIZE_ZERO = 0x00000200 # for Zero Length fields PERF_SIZE_VARIABLE_LEN = 0x00000300 # length is in the CounterLength field of the Counter Definition structure # select one of the following values to indicate the counter field usage PERF_TYPE_NUMBER = 0x00000000 # a number (not a counter) PERF_TYPE_COUNTER = 0x00000400 # an increasing numeric value PERF_TYPE_TEXT = 0x00000800 # a text field PERF_TYPE_ZERO = 0x00000C00 # displays a zero # If the PERF_TYPE_NUMBER field was selected, then select one of the # following to describe the Number PERF_NUMBER_HEX = 0x00000000 # display as HEX value PERF_NUMBER_DECIMAL = 0x00010000 # display as a decimal integer PERF_NUMBER_DEC_1000 = 0x00020000 # display as a decimal/1000 # If the PERF_TYPE_COUNTER value was selected then select one of the # following to indicate the type of counter PERF_COUNTER_VALUE = 0x00000000 # display counter value PERF_COUNTER_RATE = 0x00010000 # divide ctr / delta time PERF_COUNTER_FRACTION = 0x00020000 # divide ctr / base PERF_COUNTER_BASE = 0x00030000 # base value used in fractions PERF_COUNTER_ELAPSED = 0x00040000 # subtract counter from current time PERF_COUNTER_QUEUE_LEN = 0x00050000 # Use Queue len processing func. PERF_COUNTER_HISTOGRAM = 0x00060000 # Counter begins or ends a histogram # If the PERF_TYPE_TEXT value was selected, then select one of the # following to indicate the type of TEXT data. PERF_TEXT_UNICODE = 0x00000000 # type of text in text field PERF_TEXT_ASCII = 0x00010000 # ASCII using the CodePage field # Timer SubTypes PERF_TIMER_TICK = 0x00000000 # use system perf. freq for base PERF_TIMER_100NS = 0x00100000 # use 100 NS timer time base units PERF_OBJECT_TIMER = 0x00200000 # use the object timer freq # Any types that have calculations performed can use one or more of the # following calculation modification flags listed here PERF_DELTA_COUNTER = 0x00400000 # compute difference first PERF_DELTA_BASE = 0x00800000 # compute base diff as well PERF_INVERSE_COUNTER = 0x01000000 # show as 1.00-value (assumes: PERF_MULTI_COUNTER = 0x02000000 # sum of multiple instances # Select one of the following values to indicate the display suffix (if any) PERF_DISPLAY_NO_SUFFIX = 0x00000000 # no suffix PERF_DISPLAY_PER_SEC = 0x10000000 # "/sec" PERF_DISPLAY_PERCENT = 0x20000000 # "%" PERF_DISPLAY_SECONDS = 0x30000000 # "secs" PERF_DISPLAY_NO_SHOW = 0x40000000 # value is not displayed build_counter = staticmethod(build_counter) def __init__(self, path, obj, instance, index, counter): self.path = path self.obj = obj self.instance = instance self.index = index self.counter = counter self.handle = None self.info = None self.type = None def add_to_query(self, query): ''' Add the current path to the query Args: query (obj): The handle to the query to add the counter ''' self.handle = win32pdh.AddCounter(query, self.path) def get_info(self): ''' Get information about the counter .. note:: GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes if this is called after sampling data. ''' if not self.info: ci = win32pdh.GetCounterInfo(self.handle, 0) self.info = { 'type': ci[0], 'version': ci[1], 'scale': ci[2], 'default_scale': ci[3], 'user_data': ci[4], 'query_user_data': ci[5], 'full_path': ci[6], 'machine_name': ci[7][0], 'object_name': ci[7][1], 'instance_name': ci[7][2], 'parent_instance': ci[7][3], 'instance_index': ci[7][4], 'counter_name': ci[7][5], 'explain_text': ci[8] } return self.info def value(self): ''' Return the counter value Returns: long: The counter value ''' (counter_type, value) = win32pdh.GetFormattedCounterValue( self.handle, win32pdh.PDH_FMT_DOUBLE) self.type = counter_type return value def type_string(self): ''' Returns the names of the flags that are set in the Type field It can be used to format the counter. ''' type = self.get_info()['type'] type_list = [] for member in dir(self): if member.startswith("PERF_"): bit = getattr(self, member) if bit and bit & type: type_list.append(member[5:]) return type_list def __str__(self): return self.path
saltstack/salt
salt/utils/win_pdh.py
Counter.add_to_query
python
def add_to_query(self, query): ''' Add the current path to the query Args: query (obj): The handle to the query to add the counter ''' self.handle = win32pdh.AddCounter(query, self.path)
Add the current path to the query Args: query (obj): The handle to the query to add the counter
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_pdh.py#L183-L191
null
class Counter(object): ''' Counter object Has enumerations and functions for working with counters ''' # The dwType field from GetCounterInfo returns the following, or'ed. # These come from WinPerf.h PERF_SIZE_DWORD = 0x00000000 PERF_SIZE_LARGE = 0x00000100 PERF_SIZE_ZERO = 0x00000200 # for Zero Length fields PERF_SIZE_VARIABLE_LEN = 0x00000300 # length is in the CounterLength field of the Counter Definition structure # select one of the following values to indicate the counter field usage PERF_TYPE_NUMBER = 0x00000000 # a number (not a counter) PERF_TYPE_COUNTER = 0x00000400 # an increasing numeric value PERF_TYPE_TEXT = 0x00000800 # a text field PERF_TYPE_ZERO = 0x00000C00 # displays a zero # If the PERF_TYPE_NUMBER field was selected, then select one of the # following to describe the Number PERF_NUMBER_HEX = 0x00000000 # display as HEX value PERF_NUMBER_DECIMAL = 0x00010000 # display as a decimal integer PERF_NUMBER_DEC_1000 = 0x00020000 # display as a decimal/1000 # If the PERF_TYPE_COUNTER value was selected then select one of the # following to indicate the type of counter PERF_COUNTER_VALUE = 0x00000000 # display counter value PERF_COUNTER_RATE = 0x00010000 # divide ctr / delta time PERF_COUNTER_FRACTION = 0x00020000 # divide ctr / base PERF_COUNTER_BASE = 0x00030000 # base value used in fractions PERF_COUNTER_ELAPSED = 0x00040000 # subtract counter from current time PERF_COUNTER_QUEUE_LEN = 0x00050000 # Use Queue len processing func. PERF_COUNTER_HISTOGRAM = 0x00060000 # Counter begins or ends a histogram # If the PERF_TYPE_TEXT value was selected, then select one of the # following to indicate the type of TEXT data. PERF_TEXT_UNICODE = 0x00000000 # type of text in text field PERF_TEXT_ASCII = 0x00010000 # ASCII using the CodePage field # Timer SubTypes PERF_TIMER_TICK = 0x00000000 # use system perf. freq for base PERF_TIMER_100NS = 0x00100000 # use 100 NS timer time base units PERF_OBJECT_TIMER = 0x00200000 # use the object timer freq # Any types that have calculations performed can use one or more of the # following calculation modification flags listed here PERF_DELTA_COUNTER = 0x00400000 # compute difference first PERF_DELTA_BASE = 0x00800000 # compute base diff as well PERF_INVERSE_COUNTER = 0x01000000 # show as 1.00-value (assumes: PERF_MULTI_COUNTER = 0x02000000 # sum of multiple instances # Select one of the following values to indicate the display suffix (if any) PERF_DISPLAY_NO_SUFFIX = 0x00000000 # no suffix PERF_DISPLAY_PER_SEC = 0x10000000 # "/sec" PERF_DISPLAY_PERCENT = 0x20000000 # "%" PERF_DISPLAY_SECONDS = 0x30000000 # "secs" PERF_DISPLAY_NO_SHOW = 0x40000000 # value is not displayed def build_counter(obj, instance, instance_index, counter): r''' Makes a fully resolved counter path. Counter names are formatted like this: ``\Processor(*)\% Processor Time`` The above breaks down like this: obj = 'Processor' instance = '*' counter = '% Processor Time' Args: obj (str): The top level object instance (str): The instance of the object instance_index (int): The index of the instance. Can usually be 0 counter (str): The name of the counter Returns: Counter: A Counter object with the path if valid Raises: CommandExecutionError: If the path is invalid ''' path = win32pdh.MakeCounterPath( (None, obj, instance, None, instance_index, counter), 0) if win32pdh.ValidatePath(path) is 0: return Counter(path, obj, instance, instance_index, counter) raise CommandExecutionError('Invalid counter specified: {0}'.format(path)) build_counter = staticmethod(build_counter) def __init__(self, path, obj, instance, index, counter): self.path = path self.obj = obj self.instance = instance self.index = index self.counter = counter self.handle = None self.info = None self.type = None def get_info(self): ''' Get information about the counter .. note:: GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes if this is called after sampling data. ''' if not self.info: ci = win32pdh.GetCounterInfo(self.handle, 0) self.info = { 'type': ci[0], 'version': ci[1], 'scale': ci[2], 'default_scale': ci[3], 'user_data': ci[4], 'query_user_data': ci[5], 'full_path': ci[6], 'machine_name': ci[7][0], 'object_name': ci[7][1], 'instance_name': ci[7][2], 'parent_instance': ci[7][3], 'instance_index': ci[7][4], 'counter_name': ci[7][5], 'explain_text': ci[8] } return self.info def value(self): ''' Return the counter value Returns: long: The counter value ''' (counter_type, value) = win32pdh.GetFormattedCounterValue( self.handle, win32pdh.PDH_FMT_DOUBLE) self.type = counter_type return value def type_string(self): ''' Returns the names of the flags that are set in the Type field It can be used to format the counter. ''' type = self.get_info()['type'] type_list = [] for member in dir(self): if member.startswith("PERF_"): bit = getattr(self, member) if bit and bit & type: type_list.append(member[5:]) return type_list def __str__(self): return self.path
saltstack/salt
salt/utils/win_pdh.py
Counter.get_info
python
def get_info(self): ''' Get information about the counter .. note:: GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes if this is called after sampling data. ''' if not self.info: ci = win32pdh.GetCounterInfo(self.handle, 0) self.info = { 'type': ci[0], 'version': ci[1], 'scale': ci[2], 'default_scale': ci[3], 'user_data': ci[4], 'query_user_data': ci[5], 'full_path': ci[6], 'machine_name': ci[7][0], 'object_name': ci[7][1], 'instance_name': ci[7][2], 'parent_instance': ci[7][3], 'instance_index': ci[7][4], 'counter_name': ci[7][5], 'explain_text': ci[8] } return self.info
Get information about the counter .. note:: GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes if this is called after sampling data.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_pdh.py#L193-L219
null
class Counter(object): ''' Counter object Has enumerations and functions for working with counters ''' # The dwType field from GetCounterInfo returns the following, or'ed. # These come from WinPerf.h PERF_SIZE_DWORD = 0x00000000 PERF_SIZE_LARGE = 0x00000100 PERF_SIZE_ZERO = 0x00000200 # for Zero Length fields PERF_SIZE_VARIABLE_LEN = 0x00000300 # length is in the CounterLength field of the Counter Definition structure # select one of the following values to indicate the counter field usage PERF_TYPE_NUMBER = 0x00000000 # a number (not a counter) PERF_TYPE_COUNTER = 0x00000400 # an increasing numeric value PERF_TYPE_TEXT = 0x00000800 # a text field PERF_TYPE_ZERO = 0x00000C00 # displays a zero # If the PERF_TYPE_NUMBER field was selected, then select one of the # following to describe the Number PERF_NUMBER_HEX = 0x00000000 # display as HEX value PERF_NUMBER_DECIMAL = 0x00010000 # display as a decimal integer PERF_NUMBER_DEC_1000 = 0x00020000 # display as a decimal/1000 # If the PERF_TYPE_COUNTER value was selected then select one of the # following to indicate the type of counter PERF_COUNTER_VALUE = 0x00000000 # display counter value PERF_COUNTER_RATE = 0x00010000 # divide ctr / delta time PERF_COUNTER_FRACTION = 0x00020000 # divide ctr / base PERF_COUNTER_BASE = 0x00030000 # base value used in fractions PERF_COUNTER_ELAPSED = 0x00040000 # subtract counter from current time PERF_COUNTER_QUEUE_LEN = 0x00050000 # Use Queue len processing func. PERF_COUNTER_HISTOGRAM = 0x00060000 # Counter begins or ends a histogram # If the PERF_TYPE_TEXT value was selected, then select one of the # following to indicate the type of TEXT data. PERF_TEXT_UNICODE = 0x00000000 # type of text in text field PERF_TEXT_ASCII = 0x00010000 # ASCII using the CodePage field # Timer SubTypes PERF_TIMER_TICK = 0x00000000 # use system perf. freq for base PERF_TIMER_100NS = 0x00100000 # use 100 NS timer time base units PERF_OBJECT_TIMER = 0x00200000 # use the object timer freq # Any types that have calculations performed can use one or more of the # following calculation modification flags listed here PERF_DELTA_COUNTER = 0x00400000 # compute difference first PERF_DELTA_BASE = 0x00800000 # compute base diff as well PERF_INVERSE_COUNTER = 0x01000000 # show as 1.00-value (assumes: PERF_MULTI_COUNTER = 0x02000000 # sum of multiple instances # Select one of the following values to indicate the display suffix (if any) PERF_DISPLAY_NO_SUFFIX = 0x00000000 # no suffix PERF_DISPLAY_PER_SEC = 0x10000000 # "/sec" PERF_DISPLAY_PERCENT = 0x20000000 # "%" PERF_DISPLAY_SECONDS = 0x30000000 # "secs" PERF_DISPLAY_NO_SHOW = 0x40000000 # value is not displayed def build_counter(obj, instance, instance_index, counter): r''' Makes a fully resolved counter path. Counter names are formatted like this: ``\Processor(*)\% Processor Time`` The above breaks down like this: obj = 'Processor' instance = '*' counter = '% Processor Time' Args: obj (str): The top level object instance (str): The instance of the object instance_index (int): The index of the instance. Can usually be 0 counter (str): The name of the counter Returns: Counter: A Counter object with the path if valid Raises: CommandExecutionError: If the path is invalid ''' path = win32pdh.MakeCounterPath( (None, obj, instance, None, instance_index, counter), 0) if win32pdh.ValidatePath(path) is 0: return Counter(path, obj, instance, instance_index, counter) raise CommandExecutionError('Invalid counter specified: {0}'.format(path)) build_counter = staticmethod(build_counter) def __init__(self, path, obj, instance, index, counter): self.path = path self.obj = obj self.instance = instance self.index = index self.counter = counter self.handle = None self.info = None self.type = None def add_to_query(self, query): ''' Add the current path to the query Args: query (obj): The handle to the query to add the counter ''' self.handle = win32pdh.AddCounter(query, self.path) def value(self): ''' Return the counter value Returns: long: The counter value ''' (counter_type, value) = win32pdh.GetFormattedCounterValue( self.handle, win32pdh.PDH_FMT_DOUBLE) self.type = counter_type return value def type_string(self): ''' Returns the names of the flags that are set in the Type field It can be used to format the counter. ''' type = self.get_info()['type'] type_list = [] for member in dir(self): if member.startswith("PERF_"): bit = getattr(self, member) if bit and bit & type: type_list.append(member[5:]) return type_list def __str__(self): return self.path
saltstack/salt
salt/utils/win_pdh.py
Counter.value
python
def value(self): ''' Return the counter value Returns: long: The counter value ''' (counter_type, value) = win32pdh.GetFormattedCounterValue( self.handle, win32pdh.PDH_FMT_DOUBLE) self.type = counter_type return value
Return the counter value Returns: long: The counter value
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_pdh.py#L221-L231
null
class Counter(object): ''' Counter object Has enumerations and functions for working with counters ''' # The dwType field from GetCounterInfo returns the following, or'ed. # These come from WinPerf.h PERF_SIZE_DWORD = 0x00000000 PERF_SIZE_LARGE = 0x00000100 PERF_SIZE_ZERO = 0x00000200 # for Zero Length fields PERF_SIZE_VARIABLE_LEN = 0x00000300 # length is in the CounterLength field of the Counter Definition structure # select one of the following values to indicate the counter field usage PERF_TYPE_NUMBER = 0x00000000 # a number (not a counter) PERF_TYPE_COUNTER = 0x00000400 # an increasing numeric value PERF_TYPE_TEXT = 0x00000800 # a text field PERF_TYPE_ZERO = 0x00000C00 # displays a zero # If the PERF_TYPE_NUMBER field was selected, then select one of the # following to describe the Number PERF_NUMBER_HEX = 0x00000000 # display as HEX value PERF_NUMBER_DECIMAL = 0x00010000 # display as a decimal integer PERF_NUMBER_DEC_1000 = 0x00020000 # display as a decimal/1000 # If the PERF_TYPE_COUNTER value was selected then select one of the # following to indicate the type of counter PERF_COUNTER_VALUE = 0x00000000 # display counter value PERF_COUNTER_RATE = 0x00010000 # divide ctr / delta time PERF_COUNTER_FRACTION = 0x00020000 # divide ctr / base PERF_COUNTER_BASE = 0x00030000 # base value used in fractions PERF_COUNTER_ELAPSED = 0x00040000 # subtract counter from current time PERF_COUNTER_QUEUE_LEN = 0x00050000 # Use Queue len processing func. PERF_COUNTER_HISTOGRAM = 0x00060000 # Counter begins or ends a histogram # If the PERF_TYPE_TEXT value was selected, then select one of the # following to indicate the type of TEXT data. PERF_TEXT_UNICODE = 0x00000000 # type of text in text field PERF_TEXT_ASCII = 0x00010000 # ASCII using the CodePage field # Timer SubTypes PERF_TIMER_TICK = 0x00000000 # use system perf. freq for base PERF_TIMER_100NS = 0x00100000 # use 100 NS timer time base units PERF_OBJECT_TIMER = 0x00200000 # use the object timer freq # Any types that have calculations performed can use one or more of the # following calculation modification flags listed here PERF_DELTA_COUNTER = 0x00400000 # compute difference first PERF_DELTA_BASE = 0x00800000 # compute base diff as well PERF_INVERSE_COUNTER = 0x01000000 # show as 1.00-value (assumes: PERF_MULTI_COUNTER = 0x02000000 # sum of multiple instances # Select one of the following values to indicate the display suffix (if any) PERF_DISPLAY_NO_SUFFIX = 0x00000000 # no suffix PERF_DISPLAY_PER_SEC = 0x10000000 # "/sec" PERF_DISPLAY_PERCENT = 0x20000000 # "%" PERF_DISPLAY_SECONDS = 0x30000000 # "secs" PERF_DISPLAY_NO_SHOW = 0x40000000 # value is not displayed def build_counter(obj, instance, instance_index, counter): r''' Makes a fully resolved counter path. Counter names are formatted like this: ``\Processor(*)\% Processor Time`` The above breaks down like this: obj = 'Processor' instance = '*' counter = '% Processor Time' Args: obj (str): The top level object instance (str): The instance of the object instance_index (int): The index of the instance. Can usually be 0 counter (str): The name of the counter Returns: Counter: A Counter object with the path if valid Raises: CommandExecutionError: If the path is invalid ''' path = win32pdh.MakeCounterPath( (None, obj, instance, None, instance_index, counter), 0) if win32pdh.ValidatePath(path) is 0: return Counter(path, obj, instance, instance_index, counter) raise CommandExecutionError('Invalid counter specified: {0}'.format(path)) build_counter = staticmethod(build_counter) def __init__(self, path, obj, instance, index, counter): self.path = path self.obj = obj self.instance = instance self.index = index self.counter = counter self.handle = None self.info = None self.type = None def add_to_query(self, query): ''' Add the current path to the query Args: query (obj): The handle to the query to add the counter ''' self.handle = win32pdh.AddCounter(query, self.path) def get_info(self): ''' Get information about the counter .. note:: GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes if this is called after sampling data. ''' if not self.info: ci = win32pdh.GetCounterInfo(self.handle, 0) self.info = { 'type': ci[0], 'version': ci[1], 'scale': ci[2], 'default_scale': ci[3], 'user_data': ci[4], 'query_user_data': ci[5], 'full_path': ci[6], 'machine_name': ci[7][0], 'object_name': ci[7][1], 'instance_name': ci[7][2], 'parent_instance': ci[7][3], 'instance_index': ci[7][4], 'counter_name': ci[7][5], 'explain_text': ci[8] } return self.info def type_string(self): ''' Returns the names of the flags that are set in the Type field It can be used to format the counter. ''' type = self.get_info()['type'] type_list = [] for member in dir(self): if member.startswith("PERF_"): bit = getattr(self, member) if bit and bit & type: type_list.append(member[5:]) return type_list def __str__(self): return self.path
saltstack/salt
salt/utils/win_pdh.py
Counter.type_string
python
def type_string(self): ''' Returns the names of the flags that are set in the Type field It can be used to format the counter. ''' type = self.get_info()['type'] type_list = [] for member in dir(self): if member.startswith("PERF_"): bit = getattr(self, member) if bit and bit & type: type_list.append(member[5:]) return type_list
Returns the names of the flags that are set in the Type field It can be used to format the counter.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/win_pdh.py#L233-L246
[ "def get_info(self):\n '''\n Get information about the counter\n\n .. note::\n GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes\n if this is called after sampling data.\n '''\n if not self.info:\n ci = win32pdh.GetCounterInfo(self.handle, 0)\n self.info = {\n 'type': ci[0],\n 'version': ci[1],\n 'scale': ci[2],\n 'default_scale': ci[3],\n 'user_data': ci[4],\n 'query_user_data': ci[5],\n 'full_path': ci[6],\n 'machine_name': ci[7][0],\n 'object_name': ci[7][1],\n 'instance_name': ci[7][2],\n 'parent_instance': ci[7][3],\n 'instance_index': ci[7][4],\n 'counter_name': ci[7][5],\n 'explain_text': ci[8]\n }\n return self.info\n" ]
class Counter(object): ''' Counter object Has enumerations and functions for working with counters ''' # The dwType field from GetCounterInfo returns the following, or'ed. # These come from WinPerf.h PERF_SIZE_DWORD = 0x00000000 PERF_SIZE_LARGE = 0x00000100 PERF_SIZE_ZERO = 0x00000200 # for Zero Length fields PERF_SIZE_VARIABLE_LEN = 0x00000300 # length is in the CounterLength field of the Counter Definition structure # select one of the following values to indicate the counter field usage PERF_TYPE_NUMBER = 0x00000000 # a number (not a counter) PERF_TYPE_COUNTER = 0x00000400 # an increasing numeric value PERF_TYPE_TEXT = 0x00000800 # a text field PERF_TYPE_ZERO = 0x00000C00 # displays a zero # If the PERF_TYPE_NUMBER field was selected, then select one of the # following to describe the Number PERF_NUMBER_HEX = 0x00000000 # display as HEX value PERF_NUMBER_DECIMAL = 0x00010000 # display as a decimal integer PERF_NUMBER_DEC_1000 = 0x00020000 # display as a decimal/1000 # If the PERF_TYPE_COUNTER value was selected then select one of the # following to indicate the type of counter PERF_COUNTER_VALUE = 0x00000000 # display counter value PERF_COUNTER_RATE = 0x00010000 # divide ctr / delta time PERF_COUNTER_FRACTION = 0x00020000 # divide ctr / base PERF_COUNTER_BASE = 0x00030000 # base value used in fractions PERF_COUNTER_ELAPSED = 0x00040000 # subtract counter from current time PERF_COUNTER_QUEUE_LEN = 0x00050000 # Use Queue len processing func. PERF_COUNTER_HISTOGRAM = 0x00060000 # Counter begins or ends a histogram # If the PERF_TYPE_TEXT value was selected, then select one of the # following to indicate the type of TEXT data. PERF_TEXT_UNICODE = 0x00000000 # type of text in text field PERF_TEXT_ASCII = 0x00010000 # ASCII using the CodePage field # Timer SubTypes PERF_TIMER_TICK = 0x00000000 # use system perf. freq for base PERF_TIMER_100NS = 0x00100000 # use 100 NS timer time base units PERF_OBJECT_TIMER = 0x00200000 # use the object timer freq # Any types that have calculations performed can use one or more of the # following calculation modification flags listed here PERF_DELTA_COUNTER = 0x00400000 # compute difference first PERF_DELTA_BASE = 0x00800000 # compute base diff as well PERF_INVERSE_COUNTER = 0x01000000 # show as 1.00-value (assumes: PERF_MULTI_COUNTER = 0x02000000 # sum of multiple instances # Select one of the following values to indicate the display suffix (if any) PERF_DISPLAY_NO_SUFFIX = 0x00000000 # no suffix PERF_DISPLAY_PER_SEC = 0x10000000 # "/sec" PERF_DISPLAY_PERCENT = 0x20000000 # "%" PERF_DISPLAY_SECONDS = 0x30000000 # "secs" PERF_DISPLAY_NO_SHOW = 0x40000000 # value is not displayed def build_counter(obj, instance, instance_index, counter): r''' Makes a fully resolved counter path. Counter names are formatted like this: ``\Processor(*)\% Processor Time`` The above breaks down like this: obj = 'Processor' instance = '*' counter = '% Processor Time' Args: obj (str): The top level object instance (str): The instance of the object instance_index (int): The index of the instance. Can usually be 0 counter (str): The name of the counter Returns: Counter: A Counter object with the path if valid Raises: CommandExecutionError: If the path is invalid ''' path = win32pdh.MakeCounterPath( (None, obj, instance, None, instance_index, counter), 0) if win32pdh.ValidatePath(path) is 0: return Counter(path, obj, instance, instance_index, counter) raise CommandExecutionError('Invalid counter specified: {0}'.format(path)) build_counter = staticmethod(build_counter) def __init__(self, path, obj, instance, index, counter): self.path = path self.obj = obj self.instance = instance self.index = index self.counter = counter self.handle = None self.info = None self.type = None def add_to_query(self, query): ''' Add the current path to the query Args: query (obj): The handle to the query to add the counter ''' self.handle = win32pdh.AddCounter(query, self.path) def get_info(self): ''' Get information about the counter .. note:: GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes if this is called after sampling data. ''' if not self.info: ci = win32pdh.GetCounterInfo(self.handle, 0) self.info = { 'type': ci[0], 'version': ci[1], 'scale': ci[2], 'default_scale': ci[3], 'user_data': ci[4], 'query_user_data': ci[5], 'full_path': ci[6], 'machine_name': ci[7][0], 'object_name': ci[7][1], 'instance_name': ci[7][2], 'parent_instance': ci[7][3], 'instance_index': ci[7][4], 'counter_name': ci[7][5], 'explain_text': ci[8] } return self.info def value(self): ''' Return the counter value Returns: long: The counter value ''' (counter_type, value) = win32pdh.GetFormattedCounterValue( self.handle, win32pdh.PDH_FMT_DOUBLE) self.type = counter_type return value def __str__(self): return self.path
saltstack/salt
salt/states/win_path.py
exists
python
def exists(name, index=None): ''' Add the directory to the system PATH at index location index Position where the directory should be placed in the PATH. This is 0-indexed, so 0 means to prepend at the very start of the PATH. .. note:: If the index is not specified, and the directory needs to be added to the PATH, then the directory will be appended to the PATH, and this state will not enforce its location within the PATH. Examples: .. code-block:: yaml 'C:\\python27': win_path.exists 'C:\\sysinternals': win_path.exists: - index: 0 'C:\\mystuff': win_path.exists: - index: -1 ''' try: name = os.path.normpath(salt.utils.stringutils.to_unicode(name)) except TypeError: name = six.text_type(name) ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} if index is not None and not isinstance(index, six.integer_types): ret['comment'] = 'Index must be an integer' ret['result'] = False return ret def _get_path_lowercase(): return [x.lower() for x in __salt__['win_path.get_path']()] def _index(path=None): if path is None: path = _get_path_lowercase() try: pos = path.index(name.lower()) except ValueError: return None else: if index is not None and index < 0: # Since a negative index was used, convert the index to a # negative index to make the changes dict easier to read, as # well as making comparisons manageable. return -(len(path) - pos) else: return pos def _changes(old, new): return {'index': {'old': old, 'new': new}} pre_path = _get_path_lowercase() num_dirs = len(pre_path) if index is not None: if index > num_dirs: ret.setdefault('warnings', []).append( 'There are only {0} directories in the PATH, using an index ' 'of {0} instead of {1}.'.format(num_dirs, index) ) index = num_dirs elif index <= -num_dirs: ret.setdefault('warnings', []).append( 'There are only {0} directories in the PATH, using an index ' 'of 0 instead of {1}.'.format(num_dirs, index) ) index = 0 old_index = _index(pre_path) comments = [] if old_index is not None: # Directory exists in PATH if index is None: # We're not enforcing the index, and the directory is in the PATH. # There's nothing to do here. comments.append('{0} already exists in the PATH.'.format(name)) return _format_comments(ret, comments) else: if index == old_index: comments.append( '{0} already exists in the PATH at index {1}.'.format( name, index ) ) return _format_comments(ret, comments) else: if __opts__['test']: ret['result'] = None comments.append( '{0} would be moved from index {1} to {2}.'.format( name, old_index, index ) ) ret['changes'] = _changes(old_index, index) return _format_comments(ret, comments) else: # Directory does not exist in PATH if __opts__['test']: ret['result'] = None comments.append( '{0} would be added to the PATH{1}.'.format( name, ' at index {0}'.format(index) if index is not None else '' ) ) ret['changes'] = _changes(old_index, index) return _format_comments(ret, comments) try: ret['result'] = __salt__['win_path.add'](name, index=index, rehash=False) except Exception as exc: comments.append('Encountered error: {0}.'.format(exc)) ret['result'] = False if ret['result']: ret['result'] = __salt__['win_path.rehash']() if not ret['result']: comments.append( 'Updated registry with new PATH, but failed to rehash.' ) new_index = _index() if ret['result']: # If we have not already determined a False result based on the return # from either win_path.add or win_path.rehash, check the new_index. ret['result'] = new_index is not None \ if index is None \ else index == new_index if index is not None and old_index is not None: comments.append( '{0} {1} from index {2} to {3}.'.format( 'Moved' if ret['result'] else 'Failed to move', name, old_index, index ) ) else: comments.append( '{0} {1} to the PATH{2}.'.format( 'Added' if ret['result'] else 'Failed to add', name, ' at index {0}'.format(index) if index is not None else '' ) ) if old_index != new_index: ret['changes'] = _changes(old_index, new_index) return _format_comments(ret, comments)
Add the directory to the system PATH at index location index Position where the directory should be placed in the PATH. This is 0-indexed, so 0 means to prepend at the very start of the PATH. .. note:: If the index is not specified, and the directory needs to be added to the PATH, then the directory will be appended to the PATH, and this state will not enforce its location within the PATH. Examples: .. code-block:: yaml 'C:\\python27': win_path.exists 'C:\\sysinternals': win_path.exists: - index: 0 'C:\\mystuff': win_path.exists: - index: -1
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/win_path.py#L66-L234
[ "def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n", "def _format_comments(ret, comments):\n ret['comment'] = ' '.join(comments)\n return ret\n", "def _get_path_lowercase():\n return [x.lower() for x in __salt__['win_path.get_path']()]\n", "def _index(path=None):\n if path is None:\n path = _get_path_lowercase()\n try:\n pos = path.index(name.lower())\n except ValueError:\n return None\n else:\n if index is not None and index < 0:\n # Since a negative index was used, convert the index to a\n # negative index to make the changes dict easier to read, as\n # well as making comparisons manageable.\n return -(len(path) - pos)\n else:\n return pos\n", "def _changes(old, new):\n return {'index': {'old': old, 'new': new}}\n" ]
# -*- coding: utf-8 -*- ''' Manage the Windows System PATH ''' from __future__ import absolute_import, print_function, unicode_literals # Import Python libs import os # Import Salt libs from salt.ext import six import salt.utils.stringutils def __virtual__(): ''' Load this state if the win_path module exists ''' return 'win_path' if 'win_path.rehash' in __salt__ else False def _format_comments(ret, comments): ret['comment'] = ' '.join(comments) return ret def absent(name): ''' Remove the directory from the SYSTEM path index: where the directory should be placed in the PATH (default: 0) Example: .. code-block:: yaml 'C:\\sysinternals': win_path.absent ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} if not __salt__['win_path.exists'](name): ret['comment'] = '{0} is not in the PATH'.format(name) return ret if __opts__['test']: ret['comment'] = '{0} would be removed from the PATH'.format(name) ret['result'] = None return ret __salt__['win_path.remove'](name) if __salt__['win_path.exists'](name): ret['comment'] = 'Failed to remove {0} from the PATH'.format(name) ret['result'] = False else: ret['comment'] = 'Removed {0} from the PATH'.format(name) ret['changes']['removed'] = name return ret
saltstack/salt
salt/proxy/rest_sample.py
id
python
def id(opts): ''' Return a unique ID for this proxy minion. This ID MUST NOT CHANGE. If it changes while the proxy is running the salt-master will get really confused and may stop talking to this minion ''' r = salt.utils.http.query(opts['proxy']['url']+'id', decode_type='json', decode=True) return r['dict']['id'].encode('ascii', 'ignore')
Return a unique ID for this proxy minion. This ID MUST NOT CHANGE. If it changes while the proxy is running the salt-master will get really confused and may stop talking to this minion
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/rest_sample.py#L69-L76
null
# -*- coding: utf-8 -*- ''' This is a simple proxy-minion designed to connect to and communicate with the bottle-based web service contained in https://github.com/saltstack/salt-contrib/tree/master/proxyminion_rest_example ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.http HAS_REST_EXAMPLE = True # This must be present or the Salt loader won't load this module __proxyenabled__ = ['rest_sample'] # Variables are scoped to this module so we can have persistent data # across calls to fns in here. GRAINS_CACHE = {} DETAILS = {} # Want logging! log = logging.getLogger(__file__) # This does nothing, it's here just as an example and to provide a log # entry when the module is loaded. def __virtual__(): ''' Only return if all the modules are available ''' log.debug('rest_sample proxy __virtual__() called...') return True # Every proxy module needs an 'init', though you can # just put DETAILS['initialized'] = True here if nothing # else needs to be done. def init(opts): log.debug('rest_sample proxy init() called...') DETAILS['initialized'] = True # Save the REST URL DETAILS['url'] = opts['proxy']['url'] # Make sure the REST URL ends with a '/' if not DETAILS['url'].endswith('/'): DETAILS['url'] += '/' def initialized(): ''' Since grains are loaded in many different places and some of those places occur before the proxy can be initialized, return whether our init() function has been called ''' return DETAILS.get('initialized', False) def alive(opts): log.debug('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-') log.debug('proxys alive() fn called') log.debug('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-') return ping() def grains(): ''' Get the grains from the proxied device ''' if not DETAILS.get('grains_cache', {}): r = salt.utils.http.query(DETAILS['url']+'info', decode_type='json', decode=True) DETAILS['grains_cache'] = r['dict'] return DETAILS['grains_cache'] def grains_refresh(): ''' Refresh the grains from the proxied device ''' DETAILS['grains_cache'] = None return grains() def fns(): return {'details': 'This key is here because a function in ' 'grains/rest_sample.py called fns() here in the proxymodule.'} def service_start(name): ''' Start a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/start/'+name, decode_type='json', decode=True) return r['dict'] def service_stop(name): ''' Stop a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/stop/'+name, decode_type='json', decode=True) return r['dict'] def service_restart(name): ''' Restart a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/restart/'+name, decode_type='json', decode=True) return r['dict'] def service_list(): ''' List "services" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/list', decode_type='json', decode=True) return r['dict'] def service_status(name): ''' Check if a service is running on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/status/'+name, decode_type='json', decode=True) return r['dict'] def package_list(): ''' List "packages" installed on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/list', decode_type='json', decode=True) return r['dict'] def package_install(name, **kwargs): ''' Install a "package" on the REST server ''' cmd = DETAILS['url']+'package/install/'+name if kwargs.get('version', False): cmd += '/'+kwargs['version'] else: cmd += '/1.0' r = salt.utils.http.query(cmd, decode_type='json', decode=True) return r['dict'] def fix_outage(): r = salt.utils.http.query(DETAILS['url']+'fix_outage') return r def uptodate(name): ''' Call the REST endpoint to see if the packages on the "server" are up to date. ''' r = salt.utils.http.query(DETAILS['url']+'package/remove/'+name, decode_type='json', decode=True) return r['dict'] def package_remove(name): ''' Remove a "package" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/remove/'+name, decode_type='json', decode=True) return r['dict'] def package_status(name): ''' Check the installation status of a package on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/status/'+name, decode_type='json', decode=True) return r['dict'] def ping(): ''' Is the REST server up? ''' r = salt.utils.http.query(DETAILS['url']+'ping', decode_type='json', decode=True) try: return r['dict'].get('ret', False) except Exception: return False def shutdown(opts): ''' For this proxy shutdown is a no-op ''' log.debug('rest_sample proxy shutdown() called...') def test_from_state(): ''' Test function so we have something to call from a state :return: ''' log.debug('test_from_state called') return 'testvalue'
saltstack/salt
salt/proxy/rest_sample.py
grains
python
def grains(): ''' Get the grains from the proxied device ''' if not DETAILS.get('grains_cache', {}): r = salt.utils.http.query(DETAILS['url']+'info', decode_type='json', decode=True) DETAILS['grains_cache'] = r['dict'] return DETAILS['grains_cache']
Get the grains from the proxied device
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/rest_sample.py#L79-L86
null
# -*- coding: utf-8 -*- ''' This is a simple proxy-minion designed to connect to and communicate with the bottle-based web service contained in https://github.com/saltstack/salt-contrib/tree/master/proxyminion_rest_example ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.http HAS_REST_EXAMPLE = True # This must be present or the Salt loader won't load this module __proxyenabled__ = ['rest_sample'] # Variables are scoped to this module so we can have persistent data # across calls to fns in here. GRAINS_CACHE = {} DETAILS = {} # Want logging! log = logging.getLogger(__file__) # This does nothing, it's here just as an example and to provide a log # entry when the module is loaded. def __virtual__(): ''' Only return if all the modules are available ''' log.debug('rest_sample proxy __virtual__() called...') return True # Every proxy module needs an 'init', though you can # just put DETAILS['initialized'] = True here if nothing # else needs to be done. def init(opts): log.debug('rest_sample proxy init() called...') DETAILS['initialized'] = True # Save the REST URL DETAILS['url'] = opts['proxy']['url'] # Make sure the REST URL ends with a '/' if not DETAILS['url'].endswith('/'): DETAILS['url'] += '/' def initialized(): ''' Since grains are loaded in many different places and some of those places occur before the proxy can be initialized, return whether our init() function has been called ''' return DETAILS.get('initialized', False) def alive(opts): log.debug('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-') log.debug('proxys alive() fn called') log.debug('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-') return ping() def id(opts): ''' Return a unique ID for this proxy minion. This ID MUST NOT CHANGE. If it changes while the proxy is running the salt-master will get really confused and may stop talking to this minion ''' r = salt.utils.http.query(opts['proxy']['url']+'id', decode_type='json', decode=True) return r['dict']['id'].encode('ascii', 'ignore') def grains_refresh(): ''' Refresh the grains from the proxied device ''' DETAILS['grains_cache'] = None return grains() def fns(): return {'details': 'This key is here because a function in ' 'grains/rest_sample.py called fns() here in the proxymodule.'} def service_start(name): ''' Start a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/start/'+name, decode_type='json', decode=True) return r['dict'] def service_stop(name): ''' Stop a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/stop/'+name, decode_type='json', decode=True) return r['dict'] def service_restart(name): ''' Restart a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/restart/'+name, decode_type='json', decode=True) return r['dict'] def service_list(): ''' List "services" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/list', decode_type='json', decode=True) return r['dict'] def service_status(name): ''' Check if a service is running on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/status/'+name, decode_type='json', decode=True) return r['dict'] def package_list(): ''' List "packages" installed on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/list', decode_type='json', decode=True) return r['dict'] def package_install(name, **kwargs): ''' Install a "package" on the REST server ''' cmd = DETAILS['url']+'package/install/'+name if kwargs.get('version', False): cmd += '/'+kwargs['version'] else: cmd += '/1.0' r = salt.utils.http.query(cmd, decode_type='json', decode=True) return r['dict'] def fix_outage(): r = salt.utils.http.query(DETAILS['url']+'fix_outage') return r def uptodate(name): ''' Call the REST endpoint to see if the packages on the "server" are up to date. ''' r = salt.utils.http.query(DETAILS['url']+'package/remove/'+name, decode_type='json', decode=True) return r['dict'] def package_remove(name): ''' Remove a "package" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/remove/'+name, decode_type='json', decode=True) return r['dict'] def package_status(name): ''' Check the installation status of a package on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/status/'+name, decode_type='json', decode=True) return r['dict'] def ping(): ''' Is the REST server up? ''' r = salt.utils.http.query(DETAILS['url']+'ping', decode_type='json', decode=True) try: return r['dict'].get('ret', False) except Exception: return False def shutdown(opts): ''' For this proxy shutdown is a no-op ''' log.debug('rest_sample proxy shutdown() called...') def test_from_state(): ''' Test function so we have something to call from a state :return: ''' log.debug('test_from_state called') return 'testvalue'
saltstack/salt
salt/proxy/rest_sample.py
service_start
python
def service_start(name): ''' Start a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/start/'+name, decode_type='json', decode=True) return r['dict']
Start a "service" on the REST server
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/rest_sample.py#L102-L107
null
# -*- coding: utf-8 -*- ''' This is a simple proxy-minion designed to connect to and communicate with the bottle-based web service contained in https://github.com/saltstack/salt-contrib/tree/master/proxyminion_rest_example ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.http HAS_REST_EXAMPLE = True # This must be present or the Salt loader won't load this module __proxyenabled__ = ['rest_sample'] # Variables are scoped to this module so we can have persistent data # across calls to fns in here. GRAINS_CACHE = {} DETAILS = {} # Want logging! log = logging.getLogger(__file__) # This does nothing, it's here just as an example and to provide a log # entry when the module is loaded. def __virtual__(): ''' Only return if all the modules are available ''' log.debug('rest_sample proxy __virtual__() called...') return True # Every proxy module needs an 'init', though you can # just put DETAILS['initialized'] = True here if nothing # else needs to be done. def init(opts): log.debug('rest_sample proxy init() called...') DETAILS['initialized'] = True # Save the REST URL DETAILS['url'] = opts['proxy']['url'] # Make sure the REST URL ends with a '/' if not DETAILS['url'].endswith('/'): DETAILS['url'] += '/' def initialized(): ''' Since grains are loaded in many different places and some of those places occur before the proxy can be initialized, return whether our init() function has been called ''' return DETAILS.get('initialized', False) def alive(opts): log.debug('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-') log.debug('proxys alive() fn called') log.debug('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-') return ping() def id(opts): ''' Return a unique ID for this proxy minion. This ID MUST NOT CHANGE. If it changes while the proxy is running the salt-master will get really confused and may stop talking to this minion ''' r = salt.utils.http.query(opts['proxy']['url']+'id', decode_type='json', decode=True) return r['dict']['id'].encode('ascii', 'ignore') def grains(): ''' Get the grains from the proxied device ''' if not DETAILS.get('grains_cache', {}): r = salt.utils.http.query(DETAILS['url']+'info', decode_type='json', decode=True) DETAILS['grains_cache'] = r['dict'] return DETAILS['grains_cache'] def grains_refresh(): ''' Refresh the grains from the proxied device ''' DETAILS['grains_cache'] = None return grains() def fns(): return {'details': 'This key is here because a function in ' 'grains/rest_sample.py called fns() here in the proxymodule.'} def service_stop(name): ''' Stop a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/stop/'+name, decode_type='json', decode=True) return r['dict'] def service_restart(name): ''' Restart a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/restart/'+name, decode_type='json', decode=True) return r['dict'] def service_list(): ''' List "services" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/list', decode_type='json', decode=True) return r['dict'] def service_status(name): ''' Check if a service is running on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/status/'+name, decode_type='json', decode=True) return r['dict'] def package_list(): ''' List "packages" installed on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/list', decode_type='json', decode=True) return r['dict'] def package_install(name, **kwargs): ''' Install a "package" on the REST server ''' cmd = DETAILS['url']+'package/install/'+name if kwargs.get('version', False): cmd += '/'+kwargs['version'] else: cmd += '/1.0' r = salt.utils.http.query(cmd, decode_type='json', decode=True) return r['dict'] def fix_outage(): r = salt.utils.http.query(DETAILS['url']+'fix_outage') return r def uptodate(name): ''' Call the REST endpoint to see if the packages on the "server" are up to date. ''' r = salt.utils.http.query(DETAILS['url']+'package/remove/'+name, decode_type='json', decode=True) return r['dict'] def package_remove(name): ''' Remove a "package" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/remove/'+name, decode_type='json', decode=True) return r['dict'] def package_status(name): ''' Check the installation status of a package on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/status/'+name, decode_type='json', decode=True) return r['dict'] def ping(): ''' Is the REST server up? ''' r = salt.utils.http.query(DETAILS['url']+'ping', decode_type='json', decode=True) try: return r['dict'].get('ret', False) except Exception: return False def shutdown(opts): ''' For this proxy shutdown is a no-op ''' log.debug('rest_sample proxy shutdown() called...') def test_from_state(): ''' Test function so we have something to call from a state :return: ''' log.debug('test_from_state called') return 'testvalue'
saltstack/salt
salt/proxy/rest_sample.py
service_list
python
def service_list(): ''' List "services" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/list', decode_type='json', decode=True) return r['dict']
List "services" on the REST server
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/rest_sample.py#L126-L131
null
# -*- coding: utf-8 -*- ''' This is a simple proxy-minion designed to connect to and communicate with the bottle-based web service contained in https://github.com/saltstack/salt-contrib/tree/master/proxyminion_rest_example ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.http HAS_REST_EXAMPLE = True # This must be present or the Salt loader won't load this module __proxyenabled__ = ['rest_sample'] # Variables are scoped to this module so we can have persistent data # across calls to fns in here. GRAINS_CACHE = {} DETAILS = {} # Want logging! log = logging.getLogger(__file__) # This does nothing, it's here just as an example and to provide a log # entry when the module is loaded. def __virtual__(): ''' Only return if all the modules are available ''' log.debug('rest_sample proxy __virtual__() called...') return True # Every proxy module needs an 'init', though you can # just put DETAILS['initialized'] = True here if nothing # else needs to be done. def init(opts): log.debug('rest_sample proxy init() called...') DETAILS['initialized'] = True # Save the REST URL DETAILS['url'] = opts['proxy']['url'] # Make sure the REST URL ends with a '/' if not DETAILS['url'].endswith('/'): DETAILS['url'] += '/' def initialized(): ''' Since grains are loaded in many different places and some of those places occur before the proxy can be initialized, return whether our init() function has been called ''' return DETAILS.get('initialized', False) def alive(opts): log.debug('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-') log.debug('proxys alive() fn called') log.debug('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-') return ping() def id(opts): ''' Return a unique ID for this proxy minion. This ID MUST NOT CHANGE. If it changes while the proxy is running the salt-master will get really confused and may stop talking to this minion ''' r = salt.utils.http.query(opts['proxy']['url']+'id', decode_type='json', decode=True) return r['dict']['id'].encode('ascii', 'ignore') def grains(): ''' Get the grains from the proxied device ''' if not DETAILS.get('grains_cache', {}): r = salt.utils.http.query(DETAILS['url']+'info', decode_type='json', decode=True) DETAILS['grains_cache'] = r['dict'] return DETAILS['grains_cache'] def grains_refresh(): ''' Refresh the grains from the proxied device ''' DETAILS['grains_cache'] = None return grains() def fns(): return {'details': 'This key is here because a function in ' 'grains/rest_sample.py called fns() here in the proxymodule.'} def service_start(name): ''' Start a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/start/'+name, decode_type='json', decode=True) return r['dict'] def service_stop(name): ''' Stop a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/stop/'+name, decode_type='json', decode=True) return r['dict'] def service_restart(name): ''' Restart a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/restart/'+name, decode_type='json', decode=True) return r['dict'] def service_status(name): ''' Check if a service is running on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/status/'+name, decode_type='json', decode=True) return r['dict'] def package_list(): ''' List "packages" installed on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/list', decode_type='json', decode=True) return r['dict'] def package_install(name, **kwargs): ''' Install a "package" on the REST server ''' cmd = DETAILS['url']+'package/install/'+name if kwargs.get('version', False): cmd += '/'+kwargs['version'] else: cmd += '/1.0' r = salt.utils.http.query(cmd, decode_type='json', decode=True) return r['dict'] def fix_outage(): r = salt.utils.http.query(DETAILS['url']+'fix_outage') return r def uptodate(name): ''' Call the REST endpoint to see if the packages on the "server" are up to date. ''' r = salt.utils.http.query(DETAILS['url']+'package/remove/'+name, decode_type='json', decode=True) return r['dict'] def package_remove(name): ''' Remove a "package" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/remove/'+name, decode_type='json', decode=True) return r['dict'] def package_status(name): ''' Check the installation status of a package on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/status/'+name, decode_type='json', decode=True) return r['dict'] def ping(): ''' Is the REST server up? ''' r = salt.utils.http.query(DETAILS['url']+'ping', decode_type='json', decode=True) try: return r['dict'].get('ret', False) except Exception: return False def shutdown(opts): ''' For this proxy shutdown is a no-op ''' log.debug('rest_sample proxy shutdown() called...') def test_from_state(): ''' Test function so we have something to call from a state :return: ''' log.debug('test_from_state called') return 'testvalue'
saltstack/salt
salt/proxy/rest_sample.py
package_install
python
def package_install(name, **kwargs): ''' Install a "package" on the REST server ''' cmd = DETAILS['url']+'package/install/'+name if kwargs.get('version', False): cmd += '/'+kwargs['version'] else: cmd += '/1.0' r = salt.utils.http.query(cmd, decode_type='json', decode=True) return r['dict']
Install a "package" on the REST server
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/rest_sample.py#L150-L160
null
# -*- coding: utf-8 -*- ''' This is a simple proxy-minion designed to connect to and communicate with the bottle-based web service contained in https://github.com/saltstack/salt-contrib/tree/master/proxyminion_rest_example ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.http HAS_REST_EXAMPLE = True # This must be present or the Salt loader won't load this module __proxyenabled__ = ['rest_sample'] # Variables are scoped to this module so we can have persistent data # across calls to fns in here. GRAINS_CACHE = {} DETAILS = {} # Want logging! log = logging.getLogger(__file__) # This does nothing, it's here just as an example and to provide a log # entry when the module is loaded. def __virtual__(): ''' Only return if all the modules are available ''' log.debug('rest_sample proxy __virtual__() called...') return True # Every proxy module needs an 'init', though you can # just put DETAILS['initialized'] = True here if nothing # else needs to be done. def init(opts): log.debug('rest_sample proxy init() called...') DETAILS['initialized'] = True # Save the REST URL DETAILS['url'] = opts['proxy']['url'] # Make sure the REST URL ends with a '/' if not DETAILS['url'].endswith('/'): DETAILS['url'] += '/' def initialized(): ''' Since grains are loaded in many different places and some of those places occur before the proxy can be initialized, return whether our init() function has been called ''' return DETAILS.get('initialized', False) def alive(opts): log.debug('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-') log.debug('proxys alive() fn called') log.debug('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-') return ping() def id(opts): ''' Return a unique ID for this proxy minion. This ID MUST NOT CHANGE. If it changes while the proxy is running the salt-master will get really confused and may stop talking to this minion ''' r = salt.utils.http.query(opts['proxy']['url']+'id', decode_type='json', decode=True) return r['dict']['id'].encode('ascii', 'ignore') def grains(): ''' Get the grains from the proxied device ''' if not DETAILS.get('grains_cache', {}): r = salt.utils.http.query(DETAILS['url']+'info', decode_type='json', decode=True) DETAILS['grains_cache'] = r['dict'] return DETAILS['grains_cache'] def grains_refresh(): ''' Refresh the grains from the proxied device ''' DETAILS['grains_cache'] = None return grains() def fns(): return {'details': 'This key is here because a function in ' 'grains/rest_sample.py called fns() here in the proxymodule.'} def service_start(name): ''' Start a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/start/'+name, decode_type='json', decode=True) return r['dict'] def service_stop(name): ''' Stop a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/stop/'+name, decode_type='json', decode=True) return r['dict'] def service_restart(name): ''' Restart a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/restart/'+name, decode_type='json', decode=True) return r['dict'] def service_list(): ''' List "services" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/list', decode_type='json', decode=True) return r['dict'] def service_status(name): ''' Check if a service is running on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/status/'+name, decode_type='json', decode=True) return r['dict'] def package_list(): ''' List "packages" installed on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/list', decode_type='json', decode=True) return r['dict'] def fix_outage(): r = salt.utils.http.query(DETAILS['url']+'fix_outage') return r def uptodate(name): ''' Call the REST endpoint to see if the packages on the "server" are up to date. ''' r = salt.utils.http.query(DETAILS['url']+'package/remove/'+name, decode_type='json', decode=True) return r['dict'] def package_remove(name): ''' Remove a "package" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/remove/'+name, decode_type='json', decode=True) return r['dict'] def package_status(name): ''' Check the installation status of a package on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/status/'+name, decode_type='json', decode=True) return r['dict'] def ping(): ''' Is the REST server up? ''' r = salt.utils.http.query(DETAILS['url']+'ping', decode_type='json', decode=True) try: return r['dict'].get('ret', False) except Exception: return False def shutdown(opts): ''' For this proxy shutdown is a no-op ''' log.debug('rest_sample proxy shutdown() called...') def test_from_state(): ''' Test function so we have something to call from a state :return: ''' log.debug('test_from_state called') return 'testvalue'
saltstack/salt
salt/proxy/rest_sample.py
ping
python
def ping(): ''' Is the REST server up? ''' r = salt.utils.http.query(DETAILS['url']+'ping', decode_type='json', decode=True) try: return r['dict'].get('ret', False) except Exception: return False
Is the REST server up?
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/rest_sample.py#L194-L202
null
# -*- coding: utf-8 -*- ''' This is a simple proxy-minion designed to connect to and communicate with the bottle-based web service contained in https://github.com/saltstack/salt-contrib/tree/master/proxyminion_rest_example ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import logging import salt.utils.http HAS_REST_EXAMPLE = True # This must be present or the Salt loader won't load this module __proxyenabled__ = ['rest_sample'] # Variables are scoped to this module so we can have persistent data # across calls to fns in here. GRAINS_CACHE = {} DETAILS = {} # Want logging! log = logging.getLogger(__file__) # This does nothing, it's here just as an example and to provide a log # entry when the module is loaded. def __virtual__(): ''' Only return if all the modules are available ''' log.debug('rest_sample proxy __virtual__() called...') return True # Every proxy module needs an 'init', though you can # just put DETAILS['initialized'] = True here if nothing # else needs to be done. def init(opts): log.debug('rest_sample proxy init() called...') DETAILS['initialized'] = True # Save the REST URL DETAILS['url'] = opts['proxy']['url'] # Make sure the REST URL ends with a '/' if not DETAILS['url'].endswith('/'): DETAILS['url'] += '/' def initialized(): ''' Since grains are loaded in many different places and some of those places occur before the proxy can be initialized, return whether our init() function has been called ''' return DETAILS.get('initialized', False) def alive(opts): log.debug('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-') log.debug('proxys alive() fn called') log.debug('=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-') return ping() def id(opts): ''' Return a unique ID for this proxy minion. This ID MUST NOT CHANGE. If it changes while the proxy is running the salt-master will get really confused and may stop talking to this minion ''' r = salt.utils.http.query(opts['proxy']['url']+'id', decode_type='json', decode=True) return r['dict']['id'].encode('ascii', 'ignore') def grains(): ''' Get the grains from the proxied device ''' if not DETAILS.get('grains_cache', {}): r = salt.utils.http.query(DETAILS['url']+'info', decode_type='json', decode=True) DETAILS['grains_cache'] = r['dict'] return DETAILS['grains_cache'] def grains_refresh(): ''' Refresh the grains from the proxied device ''' DETAILS['grains_cache'] = None return grains() def fns(): return {'details': 'This key is here because a function in ' 'grains/rest_sample.py called fns() here in the proxymodule.'} def service_start(name): ''' Start a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/start/'+name, decode_type='json', decode=True) return r['dict'] def service_stop(name): ''' Stop a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/stop/'+name, decode_type='json', decode=True) return r['dict'] def service_restart(name): ''' Restart a "service" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/restart/'+name, decode_type='json', decode=True) return r['dict'] def service_list(): ''' List "services" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/list', decode_type='json', decode=True) return r['dict'] def service_status(name): ''' Check if a service is running on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'service/status/'+name, decode_type='json', decode=True) return r['dict'] def package_list(): ''' List "packages" installed on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/list', decode_type='json', decode=True) return r['dict'] def package_install(name, **kwargs): ''' Install a "package" on the REST server ''' cmd = DETAILS['url']+'package/install/'+name if kwargs.get('version', False): cmd += '/'+kwargs['version'] else: cmd += '/1.0' r = salt.utils.http.query(cmd, decode_type='json', decode=True) return r['dict'] def fix_outage(): r = salt.utils.http.query(DETAILS['url']+'fix_outage') return r def uptodate(name): ''' Call the REST endpoint to see if the packages on the "server" are up to date. ''' r = salt.utils.http.query(DETAILS['url']+'package/remove/'+name, decode_type='json', decode=True) return r['dict'] def package_remove(name): ''' Remove a "package" on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/remove/'+name, decode_type='json', decode=True) return r['dict'] def package_status(name): ''' Check the installation status of a package on the REST server ''' r = salt.utils.http.query(DETAILS['url']+'package/status/'+name, decode_type='json', decode=True) return r['dict'] def shutdown(opts): ''' For this proxy shutdown is a no-op ''' log.debug('rest_sample proxy shutdown() called...') def test_from_state(): ''' Test function so we have something to call from a state :return: ''' log.debug('test_from_state called') return 'testvalue'
saltstack/salt
salt/states/mac_xattr.py
exists
python
def exists(name, attributes): ''' Make sure the given attributes exist on the file/directory name The path to the file/directory attributes The attributes that should exist on the file/directory, this is accepted as an array, with key and value split with an equals sign, if you want to specify a hex value then add 0x to the beginning of the value. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not os.path.exists(name): ret['result'] = False ret['comment'] = "File or directory doesn't exist" return ret current_attrs = __salt__['xattr.list'](name) current_ids = current_attrs.keys() for attr in attributes: attr_id, attr_val = attr.split("=") attr_hex = attr_val.startswith("0x") if attr_hex: # Remove spaces and new lines so we can match these current_attrs[attr_id] = __salt__['xattr.read'](name, attr_id, hex=True).replace(" ", "").replace("\n", "") attr_val = attr_val[2:].replace(" ", "") if attr_id not in current_attrs: value_matches = False else: value_matches = ((current_attrs[attr_id] == attr_val) or (attr_hex and current_attrs[attr_id] == attr_val)) if attr_id in current_ids and value_matches: continue else: ret['changes'][attr_id] = attr_val __salt__['xattr.write'](name, attr_id, attr_val, attr_hex) if not ret['changes']: ret['comment'] = 'All values existed correctly.' return ret
Make sure the given attributes exist on the file/directory name The path to the file/directory attributes The attributes that should exist on the file/directory, this is accepted as an array, with key and value split with an equals sign, if you want to specify a hex value then add 0x to the beginning of the value.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/mac_xattr.py#L35-L85
null
# -*- coding: utf-8 -*- ''' Allows you to manage extended attributes on files or directories ================================================================ Install, enable and disable assistive access on macOS minions .. code-block:: yaml /path/to/file: xattr.exists: - attributes: - com.file.attr=test - com.apple.quarantine=0x00001111 ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os log = logging.getLogger(__name__) __virtualname__ = "xattr" def __virtual__(): ''' Only work on Mac OS ''' if __grains__['os'] in ['MacOS', 'Darwin']: return __virtualname__ return False def delete(name, attributes): ''' Make sure the given attributes are deleted from the file/directory name The path to the file/directory attributes The attributes that should be removed from the file/directory, this is accepted as an array. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not os.path.exists(name): ret['result'] = False ret['comment'] = "File or directory doesn't exist" return ret current_attrs = __salt__['xattr.list'](name) current_ids = current_attrs.keys() for attr in attributes: if attr in current_ids: __salt__['xattr.delete'](name, attr) ret['changes'][attr] = 'delete' if not ret['changes']: ret['comment'] = 'All attributes were already deleted.' return ret
saltstack/salt
salt/states/mac_xattr.py
delete
python
def delete(name, attributes): ''' Make sure the given attributes are deleted from the file/directory name The path to the file/directory attributes The attributes that should be removed from the file/directory, this is accepted as an array. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not os.path.exists(name): ret['result'] = False ret['comment'] = "File or directory doesn't exist" return ret current_attrs = __salt__['xattr.list'](name) current_ids = current_attrs.keys() for attr in attributes: if attr in current_ids: __salt__['xattr.delete'](name, attr) ret['changes'][attr] = 'delete' if not ret['changes']: ret['comment'] = 'All attributes were already deleted.' return ret
Make sure the given attributes are deleted from the file/directory name The path to the file/directory attributes The attributes that should be removed from the file/directory, this is accepted as an array.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/mac_xattr.py#L88-L121
null
# -*- coding: utf-8 -*- ''' Allows you to manage extended attributes on files or directories ================================================================ Install, enable and disable assistive access on macOS minions .. code-block:: yaml /path/to/file: xattr.exists: - attributes: - com.file.attr=test - com.apple.quarantine=0x00001111 ''' from __future__ import absolute_import, unicode_literals, print_function # Import python libs import logging import os log = logging.getLogger(__name__) __virtualname__ = "xattr" def __virtual__(): ''' Only work on Mac OS ''' if __grains__['os'] in ['MacOS', 'Darwin']: return __virtualname__ return False def exists(name, attributes): ''' Make sure the given attributes exist on the file/directory name The path to the file/directory attributes The attributes that should exist on the file/directory, this is accepted as an array, with key and value split with an equals sign, if you want to specify a hex value then add 0x to the beginning of the value. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} if not os.path.exists(name): ret['result'] = False ret['comment'] = "File or directory doesn't exist" return ret current_attrs = __salt__['xattr.list'](name) current_ids = current_attrs.keys() for attr in attributes: attr_id, attr_val = attr.split("=") attr_hex = attr_val.startswith("0x") if attr_hex: # Remove spaces and new lines so we can match these current_attrs[attr_id] = __salt__['xattr.read'](name, attr_id, hex=True).replace(" ", "").replace("\n", "") attr_val = attr_val[2:].replace(" ", "") if attr_id not in current_attrs: value_matches = False else: value_matches = ((current_attrs[attr_id] == attr_val) or (attr_hex and current_attrs[attr_id] == attr_val)) if attr_id in current_ids and value_matches: continue else: ret['changes'][attr_id] = attr_val __salt__['xattr.write'](name, attr_id, attr_val, attr_hex) if not ret['changes']: ret['comment'] = 'All values existed correctly.' return ret
saltstack/salt
salt/utils/validate/path.py
is_writeable
python
def is_writeable(path, check_parent=False): ''' Check if a given path is writeable by the current user. :param path: The path to check :param check_parent: If the path to check does not exist, check for the ability to write to the parent directory instead :returns: True or False ''' if os.access(path, os.F_OK) and os.access(path, os.W_OK): # The path exists and is writeable return True if os.access(path, os.F_OK) and not os.access(path, os.W_OK): # The path exists and is not writeable return False # The path does not exists or is not writeable if check_parent is False: # We're not allowed to check the parent directory of the provided path return False # Lets get the parent directory of the provided path parent_dir = os.path.dirname(path) if not os.access(parent_dir, os.F_OK): # Parent directory does not exit return False # Finally, return if we're allowed to write in the parent directory of the # provided path return os.access(parent_dir, os.W_OK)
Check if a given path is writeable by the current user. :param path: The path to check :param check_parent: If the path to check does not exist, check for the ability to write to the parent directory instead :returns: True or False
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/validate/path.py#L17-L50
null
# -*- coding: utf-8 -*- ''' :codeauthor: Pedro Algarvio (pedro@algarvio.me) salt.utils.validate.path ~~~~~~~~~~~~~~~~~~~~~~~~ Several path related validators ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import os def is_readable(path): ''' Check if a given path is readable by the current user. :param path: The path to check :returns: True or False ''' if os.access(path, os.F_OK) and os.access(path, os.R_OK): # The path exists and is readable return True # The path does not exist return False def is_executable(path): ''' Check if a given path is executable by the current user. :param path: The path to check :returns: True or False ''' return os.access(path, os.X_OK)
saltstack/salt
salt/utils/validate/path.py
is_readable
python
def is_readable(path): ''' Check if a given path is readable by the current user. :param path: The path to check :returns: True or False ''' if os.access(path, os.F_OK) and os.access(path, os.R_OK): # The path exists and is readable return True # The path does not exist return False
Check if a given path is readable by the current user. :param path: The path to check :returns: True or False
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/validate/path.py#L53-L66
null
# -*- coding: utf-8 -*- ''' :codeauthor: Pedro Algarvio (pedro@algarvio.me) salt.utils.validate.path ~~~~~~~~~~~~~~~~~~~~~~~~ Several path related validators ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import os def is_writeable(path, check_parent=False): ''' Check if a given path is writeable by the current user. :param path: The path to check :param check_parent: If the path to check does not exist, check for the ability to write to the parent directory instead :returns: True or False ''' if os.access(path, os.F_OK) and os.access(path, os.W_OK): # The path exists and is writeable return True if os.access(path, os.F_OK) and not os.access(path, os.W_OK): # The path exists and is not writeable return False # The path does not exists or is not writeable if check_parent is False: # We're not allowed to check the parent directory of the provided path return False # Lets get the parent directory of the provided path parent_dir = os.path.dirname(path) if not os.access(parent_dir, os.F_OK): # Parent directory does not exit return False # Finally, return if we're allowed to write in the parent directory of the # provided path return os.access(parent_dir, os.W_OK) def is_executable(path): ''' Check if a given path is executable by the current user. :param path: The path to check :returns: True or False ''' return os.access(path, os.X_OK)
saltstack/salt
salt/engines/junos_syslog.py
_SyslogServerFactory.parseData
python
def parseData(self, data, host, port, options): ''' This function will parse the raw syslog data, dynamically create the topic according to the topic specified by the user (if specified) and decide whether to send the syslog data as an event on the master bus, based on the constraints given by the user. :param data: The raw syslog event data which is to be parsed. :param host: The IP of the host from where syslog is forwarded. :param port: Port of the junos device from which the data is sent :param options: kwargs provided by the user in the configuration file. :return: The result dictionary which contains the data and the topic, if the event is to be sent on the bus. ''' data = self.obj.parse(data) data['hostip'] = host log.debug( 'Junos Syslog - received %s from %s, sent from port %s', data, host, port ) send_this_event = True for key in options: if key in data: if isinstance(options[key], (six.string_types, int)): if six.text_type(options[key]) != six.text_type(data[key]): send_this_event = False break elif isinstance(options[key], list): for opt in options[key]: if six.text_type(opt) == six.text_type(data[key]): break else: send_this_event = False break else: raise Exception( 'Arguments in config not specified properly') else: raise Exception( 'Please check the arguments given to junos engine in the\ configuration file') if send_this_event: if 'event' in data: topic = 'jnpr/syslog' for i in range(2, len(self.title)): topic += '/' + six.text_type(data[self.title[i]]) log.debug( 'Junos Syslog - sending this event on the bus: %s from %s', data, host ) result = {'send': True, 'data': data, 'topic': topic} return result else: raise Exception( 'The incoming event data could not be parsed properly.') else: result = {'send': False} return result
This function will parse the raw syslog data, dynamically create the topic according to the topic specified by the user (if specified) and decide whether to send the syslog data as an event on the master bus, based on the constraints given by the user. :param data: The raw syslog event data which is to be parsed. :param host: The IP of the host from where syslog is forwarded. :param port: Port of the junos device from which the data is sent :param options: kwargs provided by the user in the configuration file. :return: The result dictionary which contains the data and the topic, if the event is to be sent on the bus.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/junos_syslog.py#L289-L350
null
class _SyslogServerFactory(DatagramProtocol): def __init__(self, options): self.options = options self.obj = _Parser() data = [ "hostip", "priority", "severity", "facility", "timestamp", "hostname", "daemon", "pid", "message", "event"] if 'topic' in self.options: # self.title = 'jnpr/syslog' # To remove the stray '/', if not removed splitting the topic # won't work properly. Eg: '/jnpr/syslog/event' won't be split # properly if the starting '/' is not stripped self.options['topic'] = options['topic'].strip('/') topics = options['topic'].split("/") self.title = topics if len(topics) < 2 or topics[0] != 'jnpr' or topics[1] != 'syslog': log.debug( 'The topic specified in configuration should start with \ "jnpr/syslog". Using the default topic.') self.title = ['jnpr', 'syslog', 'hostname', 'event'] else: for i in range(2, len(topics)): if topics[i] not in data: log.debug( 'Please check the topic specified. \ Only the following keywords can be specified \ in the topic: hostip, priority, severity, \ facility, timestamp, hostname, daemon, pid, \ message, event. Using the default topic.') self.title = ['jnpr', 'syslog', 'hostname', 'event'] break # We are done processing the topic. All other arguments are the # filters given by the user. While processing the filters we don't # explicitly ignore the 'topic', but delete it here itself. del self.options['topic'] else: self.title = ['jnpr', 'syslog', 'hostname', 'event'] def send_event_to_salt(self, result): ''' This function identifies whether the engine is running on the master or the minion and sends the data to the master event bus accordingly. :param result: It's a dictionary which has the final data and topic. ''' if result['send']: data = result['data'] topic = result['topic'] # If the engine is run on master, get the event bus and send the # parsed event. if __opts__['__role'] == 'master': event.get_master_event(__opts__, __opts__['sock_dir'] ).fire_event(data, topic) # If the engine is run on minion, use the fire_master execution # module to send event on the master bus. else: __salt__['event.fire_master'](data=data, tag=topic) def handle_error(self, err_msg): ''' Log the error messages. ''' log.error(err_msg.getErrorMessage) def datagramReceived(self, data, connection_details): (host, port) = connection_details d = threads.deferToThread( self.parseData, data, host, port, self.options) d.addCallbacks(self.send_event_to_salt, self.handle_error)
saltstack/salt
salt/engines/junos_syslog.py
_SyslogServerFactory.send_event_to_salt
python
def send_event_to_salt(self, result): ''' This function identifies whether the engine is running on the master or the minion and sends the data to the master event bus accordingly. :param result: It's a dictionary which has the final data and topic. ''' if result['send']: data = result['data'] topic = result['topic'] # If the engine is run on master, get the event bus and send the # parsed event. if __opts__['__role'] == 'master': event.get_master_event(__opts__, __opts__['sock_dir'] ).fire_event(data, topic) # If the engine is run on minion, use the fire_master execution # module to send event on the master bus. else: __salt__['event.fire_master'](data=data, tag=topic)
This function identifies whether the engine is running on the master or the minion and sends the data to the master event bus accordingly. :param result: It's a dictionary which has the final data and topic.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/engines/junos_syslog.py#L352-L372
[ "def get_master_event(opts, sock_dir, listen=True, io_loop=None, raise_errors=False, keep_loop=False):\n '''\n Return an event object suitable for the named transport\n '''\n # TODO: AIO core is separate from transport\n if opts['transport'] in ('zeromq', 'tcp', 'detect'):\n return MasterEvent(sock_dir, opts, listen=listen, io_loop=io_loop, raise_errors=raise_errors, keep_loop=keep_loop)\n", "def fire_event(self, data, tag, timeout=1000):\n '''\n Send a single event into the publisher with payload dict \"data\" and\n event identifier \"tag\"\n\n The default is 1000 ms\n '''\n if not six.text_type(tag): # no empty tags allowed\n raise ValueError('Empty tag.')\n\n if not isinstance(data, MutableMapping): # data must be dict\n raise ValueError(\n 'Dict object expected, not \\'{0}\\'.'.format(data)\n )\n\n if not self.cpush:\n if timeout is not None:\n timeout_s = float(timeout) / 1000\n else:\n timeout_s = None\n if not self.connect_pull(timeout=timeout_s):\n return False\n\n data['_stamp'] = datetime.datetime.utcnow().isoformat()\n\n tagend = TAGEND\n if six.PY2:\n dump_data = self.serial.dumps(data)\n else:\n # Since the pack / unpack logic here is for local events only,\n # it is safe to change the wire protocol. The mechanism\n # that sends events from minion to master is outside this\n # file.\n dump_data = self.serial.dumps(data, use_bin_type=True)\n\n serialized_data = salt.utils.dicttrim.trim_dict(\n dump_data,\n self.opts['max_event_size'],\n is_msgpacked=True,\n use_bin_type=six.PY3\n )\n log.debug('Sending event: tag = %s; data = %s', tag, data)\n event = b''.join([\n salt.utils.stringutils.to_bytes(tag),\n salt.utils.stringutils.to_bytes(tagend),\n serialized_data])\n msg = salt.utils.stringutils.to_bytes(event, 'utf-8')\n if self._run_io_loop_sync:\n with salt.utils.asynchronous.current_ioloop(self.io_loop):\n try:\n self.io_loop.run_sync(lambda: self.pusher.send(msg))\n except Exception as ex:\n log.debug(ex)\n raise\n else:\n self.io_loop.spawn_callback(self.pusher.send, msg)\n return True\n" ]
class _SyslogServerFactory(DatagramProtocol): def __init__(self, options): self.options = options self.obj = _Parser() data = [ "hostip", "priority", "severity", "facility", "timestamp", "hostname", "daemon", "pid", "message", "event"] if 'topic' in self.options: # self.title = 'jnpr/syslog' # To remove the stray '/', if not removed splitting the topic # won't work properly. Eg: '/jnpr/syslog/event' won't be split # properly if the starting '/' is not stripped self.options['topic'] = options['topic'].strip('/') topics = options['topic'].split("/") self.title = topics if len(topics) < 2 or topics[0] != 'jnpr' or topics[1] != 'syslog': log.debug( 'The topic specified in configuration should start with \ "jnpr/syslog". Using the default topic.') self.title = ['jnpr', 'syslog', 'hostname', 'event'] else: for i in range(2, len(topics)): if topics[i] not in data: log.debug( 'Please check the topic specified. \ Only the following keywords can be specified \ in the topic: hostip, priority, severity, \ facility, timestamp, hostname, daemon, pid, \ message, event. Using the default topic.') self.title = ['jnpr', 'syslog', 'hostname', 'event'] break # We are done processing the topic. All other arguments are the # filters given by the user. While processing the filters we don't # explicitly ignore the 'topic', but delete it here itself. del self.options['topic'] else: self.title = ['jnpr', 'syslog', 'hostname', 'event'] def parseData(self, data, host, port, options): ''' This function will parse the raw syslog data, dynamically create the topic according to the topic specified by the user (if specified) and decide whether to send the syslog data as an event on the master bus, based on the constraints given by the user. :param data: The raw syslog event data which is to be parsed. :param host: The IP of the host from where syslog is forwarded. :param port: Port of the junos device from which the data is sent :param options: kwargs provided by the user in the configuration file. :return: The result dictionary which contains the data and the topic, if the event is to be sent on the bus. ''' data = self.obj.parse(data) data['hostip'] = host log.debug( 'Junos Syslog - received %s from %s, sent from port %s', data, host, port ) send_this_event = True for key in options: if key in data: if isinstance(options[key], (six.string_types, int)): if six.text_type(options[key]) != six.text_type(data[key]): send_this_event = False break elif isinstance(options[key], list): for opt in options[key]: if six.text_type(opt) == six.text_type(data[key]): break else: send_this_event = False break else: raise Exception( 'Arguments in config not specified properly') else: raise Exception( 'Please check the arguments given to junos engine in the\ configuration file') if send_this_event: if 'event' in data: topic = 'jnpr/syslog' for i in range(2, len(self.title)): topic += '/' + six.text_type(data[self.title[i]]) log.debug( 'Junos Syslog - sending this event on the bus: %s from %s', data, host ) result = {'send': True, 'data': data, 'topic': topic} return result else: raise Exception( 'The incoming event data could not be parsed properly.') else: result = {'send': False} return result def handle_error(self, err_msg): ''' Log the error messages. ''' log.error(err_msg.getErrorMessage) def datagramReceived(self, data, connection_details): (host, port) = connection_details d = threads.deferToThread( self.parseData, data, host, port, self.options) d.addCallbacks(self.send_event_to_salt, self.handle_error)
saltstack/salt
salt/utils/sdb.py
sdb_get
python
def sdb_get(uri, opts, utils=None): ''' Get a value from a db, using a uri in the form of ``sdb://<profile>/<key>``. If the uri provided does not start with ``sdb://``, then it will be returned as-is. ''' if not isinstance(uri, string_types) or not uri.startswith('sdb://'): return uri if utils is None: utils = salt.loader.utils(opts) sdlen = len('sdb://') indx = uri.find('/', sdlen) if (indx == -1) or not uri[(indx + 1):]: return uri profile = opts.get(uri[sdlen:indx], {}) if not profile: profile = opts.get('pillar', {}).get(uri[sdlen:indx], {}) if 'driver' not in profile: return uri fun = '{0}.get'.format(profile['driver']) query = uri[indx+1:] loaded_db = salt.loader.sdb(opts, fun, utils=utils) return loaded_db[fun](query, profile=profile)
Get a value from a db, using a uri in the form of ``sdb://<profile>/<key>``. If the uri provided does not start with ``sdb://``, then it will be returned as-is.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/sdb.py#L19-L46
null
# -*- coding: utf-8 -*- ''' Basic functions for accessing the SDB interface For configuration options, see the docs for specific sdb modules. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import random # Import salt libs import salt.loader from salt.ext.six import string_types from salt.ext.six.moves import range def sdb_set(uri, value, opts, utils=None): ''' Set a value in a db, using a uri in the form of ``sdb://<profile>/<key>``. If the uri provided does not start with ``sdb://`` or the value is not successfully set, return ``False``. ''' if not isinstance(uri, string_types) or not uri.startswith('sdb://'): return False if utils is None: utils = salt.loader.utils(opts) sdlen = len('sdb://') indx = uri.find('/', sdlen) if (indx == -1) or not uri[(indx + 1):]: return False profile = opts.get(uri[sdlen:indx], {}) if not profile: profile = opts.get('pillar', {}).get(uri[sdlen:indx], {}) if 'driver' not in profile: return False fun = '{0}.set'.format(profile['driver']) query = uri[indx+1:] loaded_db = salt.loader.sdb(opts, fun, utils=utils) return loaded_db[fun](query, value, profile=profile) def sdb_delete(uri, opts, utils=None): ''' Delete a value from a db, using a uri in the form of ``sdb://<profile>/<key>``. If the uri provided does not start with ``sdb://`` or the value is not successfully deleted, return ``False``. ''' if not isinstance(uri, string_types) or not uri.startswith('sdb://'): return False if utils is None: utils = salt.loader.utils(opts) sdlen = len('sdb://') indx = uri.find('/', sdlen) if (indx == -1) or not uri[(indx + 1):]: return False profile = opts.get(uri[sdlen:indx], {}) if not profile: profile = opts.get('pillar', {}).get(uri[sdlen:indx], {}) if 'driver' not in profile: return False fun = '{0}.delete'.format(profile['driver']) query = uri[indx+1:] loaded_db = salt.loader.sdb(opts, fun, utils=utils) return loaded_db[fun](query, profile=profile) def sdb_get_or_set_hash(uri, opts, length=8, chars='abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)', utils=None): ''' Check if value exists in sdb. If it does, return, otherwise generate a random string and store it. This can be used for storing secrets in a centralized place. ''' if not isinstance(uri, string_types) or not uri.startswith('sdb://'): return False if utils is None: utils = salt.loader.utils(opts) ret = sdb_get(uri, opts, utils=utils) if ret is None: val = ''.join([random.SystemRandom().choice(chars) for _ in range(length)]) sdb_set(uri, val, opts, utils) return ret or val
saltstack/salt
salt/utils/sdb.py
sdb_get_or_set_hash
python
def sdb_get_or_set_hash(uri, opts, length=8, chars='abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)', utils=None): ''' Check if value exists in sdb. If it does, return, otherwise generate a random string and store it. This can be used for storing secrets in a centralized place. ''' if not isinstance(uri, string_types) or not uri.startswith('sdb://'): return False if utils is None: utils = salt.loader.utils(opts) ret = sdb_get(uri, opts, utils=utils) if ret is None: val = ''.join([random.SystemRandom().choice(chars) for _ in range(length)]) sdb_set(uri, val, opts, utils) return ret or val
Check if value exists in sdb. If it does, return, otherwise generate a random string and store it. This can be used for storing secrets in a centralized place.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/sdb.py#L111-L133
[ "def sdb_get(uri, opts, utils=None):\n '''\n Get a value from a db, using a uri in the form of ``sdb://<profile>/<key>``. If\n the uri provided does not start with ``sdb://``, then it will be returned as-is.\n '''\n if not isinstance(uri, string_types) or not uri.startswith('sdb://'):\n return uri\n\n if utils is None:\n utils = salt.loader.utils(opts)\n\n sdlen = len('sdb://')\n indx = uri.find('/', sdlen)\n\n if (indx == -1) or not uri[(indx + 1):]:\n return uri\n\n profile = opts.get(uri[sdlen:indx], {})\n if not profile:\n profile = opts.get('pillar', {}).get(uri[sdlen:indx], {})\n if 'driver' not in profile:\n return uri\n\n fun = '{0}.get'.format(profile['driver'])\n query = uri[indx+1:]\n\n loaded_db = salt.loader.sdb(opts, fun, utils=utils)\n return loaded_db[fun](query, profile=profile)\n" ]
# -*- coding: utf-8 -*- ''' Basic functions for accessing the SDB interface For configuration options, see the docs for specific sdb modules. ''' from __future__ import absolute_import, print_function, unicode_literals # Import python libs import random # Import salt libs import salt.loader from salt.ext.six import string_types from salt.ext.six.moves import range def sdb_get(uri, opts, utils=None): ''' Get a value from a db, using a uri in the form of ``sdb://<profile>/<key>``. If the uri provided does not start with ``sdb://``, then it will be returned as-is. ''' if not isinstance(uri, string_types) or not uri.startswith('sdb://'): return uri if utils is None: utils = salt.loader.utils(opts) sdlen = len('sdb://') indx = uri.find('/', sdlen) if (indx == -1) or not uri[(indx + 1):]: return uri profile = opts.get(uri[sdlen:indx], {}) if not profile: profile = opts.get('pillar', {}).get(uri[sdlen:indx], {}) if 'driver' not in profile: return uri fun = '{0}.get'.format(profile['driver']) query = uri[indx+1:] loaded_db = salt.loader.sdb(opts, fun, utils=utils) return loaded_db[fun](query, profile=profile) def sdb_set(uri, value, opts, utils=None): ''' Set a value in a db, using a uri in the form of ``sdb://<profile>/<key>``. If the uri provided does not start with ``sdb://`` or the value is not successfully set, return ``False``. ''' if not isinstance(uri, string_types) or not uri.startswith('sdb://'): return False if utils is None: utils = salt.loader.utils(opts) sdlen = len('sdb://') indx = uri.find('/', sdlen) if (indx == -1) or not uri[(indx + 1):]: return False profile = opts.get(uri[sdlen:indx], {}) if not profile: profile = opts.get('pillar', {}).get(uri[sdlen:indx], {}) if 'driver' not in profile: return False fun = '{0}.set'.format(profile['driver']) query = uri[indx+1:] loaded_db = salt.loader.sdb(opts, fun, utils=utils) return loaded_db[fun](query, value, profile=profile) def sdb_delete(uri, opts, utils=None): ''' Delete a value from a db, using a uri in the form of ``sdb://<profile>/<key>``. If the uri provided does not start with ``sdb://`` or the value is not successfully deleted, return ``False``. ''' if not isinstance(uri, string_types) or not uri.startswith('sdb://'): return False if utils is None: utils = salt.loader.utils(opts) sdlen = len('sdb://') indx = uri.find('/', sdlen) if (indx == -1) or not uri[(indx + 1):]: return False profile = opts.get(uri[sdlen:indx], {}) if not profile: profile = opts.get('pillar', {}).get(uri[sdlen:indx], {}) if 'driver' not in profile: return False fun = '{0}.delete'.format(profile['driver']) query = uri[indx+1:] loaded_db = salt.loader.sdb(opts, fun, utils=utils) return loaded_db[fun](query, profile=profile)
saltstack/salt
salt/modules/grafana4.py
get_users
python
def get_users(profile='grafana'): ''' List all users. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_users ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json()
List all users. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_users
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/grafana4.py#L65-L89
[ "def _get_headers(profile):\n headers = {'Content-type': 'application/json'}\n if profile.get('grafana_token', False):\n headers['Authorization'] = 'Bearer {0}'.format(\n profile['grafana_token'])\n return headers\n", "def _get_auth(profile):\n if profile.get('grafana_token', False):\n return None\n return requests.auth.HTTPBasicAuth(\n profile['grafana_user'],\n profile['grafana_password']\n )\n" ]
# -*- coding: utf-8 -*- ''' Module for working with the Grafana v4 API .. versionadded:: 2017.7.0 :depends: requests :configuration: This module requires a configuration profile to be configured in the minion config, minion pillar, or master config. The module will use the 'grafana' key by default, if defined. For example: .. code-block:: yaml grafana: grafana_url: http://grafana.localhost grafana_user: admin grafana_password: admin grafana_timeout: 3 ''' from __future__ import absolute_import, print_function, unicode_literals try: import requests HAS_LIBS = True except ImportError: HAS_LIBS = False from salt.ext.six import string_types __virtualname__ = 'grafana4' def __virtual__(): ''' Only load if requests is installed ''' if HAS_LIBS: return __virtualname__ else: return False, 'The "{0}" module could not be loaded: ' \ '"requests" is not installed.'.format(__virtualname__) def _get_headers(profile): headers = {'Content-type': 'application/json'} if profile.get('grafana_token', False): headers['Authorization'] = 'Bearer {0}'.format( profile['grafana_token']) return headers def _get_auth(profile): if profile.get('grafana_token', False): return None return requests.auth.HTTPBasicAuth( profile['grafana_user'], profile['grafana_password'] ) def get_user(login, profile='grafana'): ''' Show a single user. login Login of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user <login> ''' data = get_users(profile) for user in data: if user['login'] == login: return user return None def get_user_data(userid, profile='grafana'): ''' Get user data. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user_data <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_user(profile='grafana', **kwargs): ''' Create a new user. login Login of the new user. password Password of the new user. email Email of the new user. name Optional - Full name of the new user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_user login=<login> password=<password> email=<email> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.post( '{0}/api/admin/users'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_user(userid, profile='grafana', orgid=None, **kwargs): ''' Update an existing user. userid Id of the user. login Optional - Login of the user. email Optional - Email of the user. name Optional - Full name of the user. orgid Optional - Default Organization of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user <user_id> login=<login> email=<email> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/users/{1}'.format(profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() if orgid: response2 = requests.post( '{0}/api/users/{1}/using/{2}'.format(profile['grafana_url'], userid, orgid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response2.status_code >= 400: response2.raise_for_status() return response.json() def update_user_password(userid, profile='grafana', **kwargs): ''' Update a user password. userid Id of the user. password New password of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user_password <user_id> password=<password> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/admin/users/{1}/password'.format( profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_user_permissions(userid, profile='grafana', **kwargs): ''' Update a user password. userid Id of the user. isGrafanaAdmin Whether user is a Grafana admin. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user_permissions <user_id> isGrafanaAdmin=<true|false> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/admin/users/{1}/permissions'.format( profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_user(userid, profile='grafana'): ''' Delete a user. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_user <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/admin/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_user_orgs(userid, profile='grafana'): ''' Get the list of organisations a user belong to. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user_orgs <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users/{1}/orgs'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_user_org(userid, orgid, profile='grafana'): ''' Remove a user from an organization. userid Id of the user. orgid Id of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_user_org <user_id> <org_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/orgs/{1}/users/{2}'.format( profile['grafana_url'], orgid, userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_orgs(profile='grafana'): ''' List all organizations. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_orgs ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/orgs'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org(name, profile='grafana'): ''' Show a single organization. name Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/orgs/name/{1}'.format(profile['grafana_url'], name), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def switch_org(orgname, profile='grafana'): ''' Switch the current organization. name Name of the organization to switch to. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.switch_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) org = get_org(orgname, profile) response = requests.post( '{0}/api/user/using/{1}'.format(profile['grafana_url'], org['id']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return org def get_org_users(orgname=None, profile='grafana'): ''' Get the list of users that belong to the organization. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_users <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/users'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_org_user(orgname=None, profile='grafana', **kwargs): ''' Add user to the organization. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are added. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_org_user <orgname> loginOrEmail=<loginOrEmail> role=<role> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( '{0}/api/org/users'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_user(userid, orgname=None, profile='grafana', **kwargs): ''' Update user role in the organization. userid Id of the user. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_user <user_id> <orgname> loginOrEmail=<loginOrEmail> role=<role> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.patch( '{0}/api/org/users/{1}'.format(profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_org_user(userid, orgname=None, profile='grafana'): ''' Remove user from the organization. userid Id of the user. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_org_user <user_id> <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.delete( '{0}/api/org/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_address(orgname=None, profile='grafana'): ''' Get the organization address. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_address <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/address'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_address(orgname=None, profile='grafana', **kwargs): ''' Update the organization address. orgname Name of the organization in which users are updated. address1 Optional - address1 of the org. address2 Optional - address2 of the org. city Optional - city of the org. zip_code Optional - zip_code of the org. state Optional - state of the org. country Optional - country of the org. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_address <orgname> country=<country> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.put( '{0}/api/org/address'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_prefs(orgname=None, profile='grafana'): ''' Get the organization preferences. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_prefs <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/preferences'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_prefs(orgname=None, profile='grafana', **kwargs): ''' Update the organization preferences. orgname Name of the organization in which users are updated. theme Selected theme for the org. homeDashboardId Home dashboard for the org. timezone Timezone for the org (one of: "browser", "utc", or ""). profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_prefs <orgname> theme=<theme> timezone=<timezone> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.put( '{0}/api/org/preferences'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_org(profile='grafana', **kwargs): ''' Create a new organization. name Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.post( '{0}/api/orgs'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org(orgid, profile='grafana', **kwargs): ''' Update an existing organization. orgid Id of the organization. name New name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org <org_id> name=<name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/orgs/{1}'.format(profile['grafana_url'], orgid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_org(orgid, profile='grafana'): ''' Delete an organization. orgid Id of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_org <org_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/orgs/{1}'.format(profile['grafana_url'], orgid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_datasources(orgname=None, profile='grafana'): ''' List all datasources in an organisation. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasources <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/datasources'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_datasource(name, orgname=None, profile='grafana'): ''' Show a single datasource in an organisation. name Name of the datasource. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasource <name> <orgname> ''' data = get_datasources(orgname=orgname, profile=profile) for datasource in data: if datasource['name'] == name: return datasource return None def create_datasource(orgname=None, profile='grafana', **kwargs): ''' Create a new datasource in an organisation. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basicAuth Optional - set to True to use HTTP basic auth to authenticate with the data source. basicAuthUser Optional - HTTP basic auth username. basicAuthPassword Optional - HTTP basic auth password. jsonData Optional - additional json data to post (eg. "timeInterval"). isDefault Optional - set data source as default. withCredentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. typeLogoUrl Optional - Logo to use for this datasource. orgname Name of the organization in which the data source should be created. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_datasource ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( '{0}/api/datasources'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_datasource(datasourceid, orgname=None, profile='grafana', **kwargs): ''' Update a datasource. datasourceid Id of the datasource. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basicAuth Optional - set to True to use HTTP basic auth to authenticate with the data source. basicAuthUser Optional - HTTP basic auth username. basicAuthPassword Optional - HTTP basic auth password. jsonData Optional - additional json data to post (eg. "timeInterval"). isDefault Optional - set data source as default. withCredentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. typeLogoUrl Optional - Logo to use for this datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_datasource <datasourceid> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() # temporary fix for https://github.com/grafana/grafana/issues/6869 #return response.json() return {} def delete_datasource(datasourceid, orgname=None, profile='grafana'): ''' Delete a datasource. datasourceid Id of the datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_datasource <datasource_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_dashboard(slug, orgname=None, profile='grafana'): ''' Get a dashboard. slug Slug (name) of the dashboard. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_dashboard <slug> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/dashboards/db/{1}'.format(profile['grafana_url'], slug), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) data = response.json() if response.status_code == 404: return None if response.status_code >= 400: response.raise_for_status() return data.get('dashboard') def delete_dashboard(slug, orgname=None, profile='grafana'): ''' Delete a dashboard. slug Slug (name) of the dashboard. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_dashboard <slug> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.delete( '{0}/api/dashboards/db/{1}'.format(profile['grafana_url'], slug), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_update_dashboard(orgname=None, profile='grafana', **kwargs): ''' Create or update a dashboard. dashboard A dict that defines the dashboard to create/update. overwrite Whether the dashboard should be overwritten if already existing. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_update_dashboard dashboard=<dashboard> overwrite=True orgname=<orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( "{0}/api/dashboards/db".format(profile.get('grafana_url')), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json()
saltstack/salt
salt/modules/grafana4.py
get_user
python
def get_user(login, profile='grafana'): ''' Show a single user. login Login of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user <login> ''' data = get_users(profile) for user in data: if user['login'] == login: return user return None
Show a single user. login Login of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user <login>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/grafana4.py#L92-L113
[ "def get_users(profile='grafana'):\n '''\n List all users.\n\n profile\n Configuration profile used to connect to the Grafana instance.\n Default is 'grafana'.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' grafana4.get_users\n '''\n if isinstance(profile, string_types):\n profile = __salt__['config.option'](profile)\n response = requests.get(\n '{0}/api/users'.format(profile['grafana_url']),\n auth=_get_auth(profile),\n headers=_get_headers(profile),\n timeout=profile.get('grafana_timeout', 3),\n )\n if response.status_code >= 400:\n response.raise_for_status()\n return response.json()\n" ]
# -*- coding: utf-8 -*- ''' Module for working with the Grafana v4 API .. versionadded:: 2017.7.0 :depends: requests :configuration: This module requires a configuration profile to be configured in the minion config, minion pillar, or master config. The module will use the 'grafana' key by default, if defined. For example: .. code-block:: yaml grafana: grafana_url: http://grafana.localhost grafana_user: admin grafana_password: admin grafana_timeout: 3 ''' from __future__ import absolute_import, print_function, unicode_literals try: import requests HAS_LIBS = True except ImportError: HAS_LIBS = False from salt.ext.six import string_types __virtualname__ = 'grafana4' def __virtual__(): ''' Only load if requests is installed ''' if HAS_LIBS: return __virtualname__ else: return False, 'The "{0}" module could not be loaded: ' \ '"requests" is not installed.'.format(__virtualname__) def _get_headers(profile): headers = {'Content-type': 'application/json'} if profile.get('grafana_token', False): headers['Authorization'] = 'Bearer {0}'.format( profile['grafana_token']) return headers def _get_auth(profile): if profile.get('grafana_token', False): return None return requests.auth.HTTPBasicAuth( profile['grafana_user'], profile['grafana_password'] ) def get_users(profile='grafana'): ''' List all users. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_users ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_user_data(userid, profile='grafana'): ''' Get user data. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user_data <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_user(profile='grafana', **kwargs): ''' Create a new user. login Login of the new user. password Password of the new user. email Email of the new user. name Optional - Full name of the new user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_user login=<login> password=<password> email=<email> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.post( '{0}/api/admin/users'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_user(userid, profile='grafana', orgid=None, **kwargs): ''' Update an existing user. userid Id of the user. login Optional - Login of the user. email Optional - Email of the user. name Optional - Full name of the user. orgid Optional - Default Organization of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user <user_id> login=<login> email=<email> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/users/{1}'.format(profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() if orgid: response2 = requests.post( '{0}/api/users/{1}/using/{2}'.format(profile['grafana_url'], userid, orgid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response2.status_code >= 400: response2.raise_for_status() return response.json() def update_user_password(userid, profile='grafana', **kwargs): ''' Update a user password. userid Id of the user. password New password of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user_password <user_id> password=<password> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/admin/users/{1}/password'.format( profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_user_permissions(userid, profile='grafana', **kwargs): ''' Update a user password. userid Id of the user. isGrafanaAdmin Whether user is a Grafana admin. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user_permissions <user_id> isGrafanaAdmin=<true|false> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/admin/users/{1}/permissions'.format( profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_user(userid, profile='grafana'): ''' Delete a user. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_user <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/admin/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_user_orgs(userid, profile='grafana'): ''' Get the list of organisations a user belong to. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user_orgs <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users/{1}/orgs'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_user_org(userid, orgid, profile='grafana'): ''' Remove a user from an organization. userid Id of the user. orgid Id of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_user_org <user_id> <org_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/orgs/{1}/users/{2}'.format( profile['grafana_url'], orgid, userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_orgs(profile='grafana'): ''' List all organizations. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_orgs ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/orgs'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org(name, profile='grafana'): ''' Show a single organization. name Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/orgs/name/{1}'.format(profile['grafana_url'], name), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def switch_org(orgname, profile='grafana'): ''' Switch the current organization. name Name of the organization to switch to. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.switch_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) org = get_org(orgname, profile) response = requests.post( '{0}/api/user/using/{1}'.format(profile['grafana_url'], org['id']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return org def get_org_users(orgname=None, profile='grafana'): ''' Get the list of users that belong to the organization. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_users <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/users'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_org_user(orgname=None, profile='grafana', **kwargs): ''' Add user to the organization. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are added. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_org_user <orgname> loginOrEmail=<loginOrEmail> role=<role> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( '{0}/api/org/users'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_user(userid, orgname=None, profile='grafana', **kwargs): ''' Update user role in the organization. userid Id of the user. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_user <user_id> <orgname> loginOrEmail=<loginOrEmail> role=<role> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.patch( '{0}/api/org/users/{1}'.format(profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_org_user(userid, orgname=None, profile='grafana'): ''' Remove user from the organization. userid Id of the user. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_org_user <user_id> <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.delete( '{0}/api/org/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_address(orgname=None, profile='grafana'): ''' Get the organization address. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_address <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/address'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_address(orgname=None, profile='grafana', **kwargs): ''' Update the organization address. orgname Name of the organization in which users are updated. address1 Optional - address1 of the org. address2 Optional - address2 of the org. city Optional - city of the org. zip_code Optional - zip_code of the org. state Optional - state of the org. country Optional - country of the org. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_address <orgname> country=<country> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.put( '{0}/api/org/address'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_prefs(orgname=None, profile='grafana'): ''' Get the organization preferences. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_prefs <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/preferences'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_prefs(orgname=None, profile='grafana', **kwargs): ''' Update the organization preferences. orgname Name of the organization in which users are updated. theme Selected theme for the org. homeDashboardId Home dashboard for the org. timezone Timezone for the org (one of: "browser", "utc", or ""). profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_prefs <orgname> theme=<theme> timezone=<timezone> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.put( '{0}/api/org/preferences'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_org(profile='grafana', **kwargs): ''' Create a new organization. name Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.post( '{0}/api/orgs'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org(orgid, profile='grafana', **kwargs): ''' Update an existing organization. orgid Id of the organization. name New name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org <org_id> name=<name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/orgs/{1}'.format(profile['grafana_url'], orgid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_org(orgid, profile='grafana'): ''' Delete an organization. orgid Id of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_org <org_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/orgs/{1}'.format(profile['grafana_url'], orgid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_datasources(orgname=None, profile='grafana'): ''' List all datasources in an organisation. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasources <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/datasources'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_datasource(name, orgname=None, profile='grafana'): ''' Show a single datasource in an organisation. name Name of the datasource. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasource <name> <orgname> ''' data = get_datasources(orgname=orgname, profile=profile) for datasource in data: if datasource['name'] == name: return datasource return None def create_datasource(orgname=None, profile='grafana', **kwargs): ''' Create a new datasource in an organisation. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basicAuth Optional - set to True to use HTTP basic auth to authenticate with the data source. basicAuthUser Optional - HTTP basic auth username. basicAuthPassword Optional - HTTP basic auth password. jsonData Optional - additional json data to post (eg. "timeInterval"). isDefault Optional - set data source as default. withCredentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. typeLogoUrl Optional - Logo to use for this datasource. orgname Name of the organization in which the data source should be created. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_datasource ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( '{0}/api/datasources'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_datasource(datasourceid, orgname=None, profile='grafana', **kwargs): ''' Update a datasource. datasourceid Id of the datasource. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basicAuth Optional - set to True to use HTTP basic auth to authenticate with the data source. basicAuthUser Optional - HTTP basic auth username. basicAuthPassword Optional - HTTP basic auth password. jsonData Optional - additional json data to post (eg. "timeInterval"). isDefault Optional - set data source as default. withCredentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. typeLogoUrl Optional - Logo to use for this datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_datasource <datasourceid> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() # temporary fix for https://github.com/grafana/grafana/issues/6869 #return response.json() return {} def delete_datasource(datasourceid, orgname=None, profile='grafana'): ''' Delete a datasource. datasourceid Id of the datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_datasource <datasource_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_dashboard(slug, orgname=None, profile='grafana'): ''' Get a dashboard. slug Slug (name) of the dashboard. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_dashboard <slug> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/dashboards/db/{1}'.format(profile['grafana_url'], slug), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) data = response.json() if response.status_code == 404: return None if response.status_code >= 400: response.raise_for_status() return data.get('dashboard') def delete_dashboard(slug, orgname=None, profile='grafana'): ''' Delete a dashboard. slug Slug (name) of the dashboard. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_dashboard <slug> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.delete( '{0}/api/dashboards/db/{1}'.format(profile['grafana_url'], slug), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_update_dashboard(orgname=None, profile='grafana', **kwargs): ''' Create or update a dashboard. dashboard A dict that defines the dashboard to create/update. overwrite Whether the dashboard should be overwritten if already existing. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_update_dashboard dashboard=<dashboard> overwrite=True orgname=<orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( "{0}/api/dashboards/db".format(profile.get('grafana_url')), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json()
saltstack/salt
salt/modules/grafana4.py
update_user
python
def update_user(userid, profile='grafana', orgid=None, **kwargs): ''' Update an existing user. userid Id of the user. login Optional - Login of the user. email Optional - Email of the user. name Optional - Full name of the user. orgid Optional - Default Organization of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user <user_id> login=<login> email=<email> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/users/{1}'.format(profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() if orgid: response2 = requests.post( '{0}/api/users/{1}/using/{2}'.format(profile['grafana_url'], userid, orgid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response2.status_code >= 400: response2.raise_for_status() return response.json()
Update an existing user. userid Id of the user. login Optional - Login of the user. email Optional - Email of the user. name Optional - Full name of the user. orgid Optional - Default Organization of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user <user_id> login=<login> email=<email>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/grafana4.py#L186-L235
[ "def _get_headers(profile):\n headers = {'Content-type': 'application/json'}\n if profile.get('grafana_token', False):\n headers['Authorization'] = 'Bearer {0}'.format(\n profile['grafana_token'])\n return headers\n", "def _get_auth(profile):\n if profile.get('grafana_token', False):\n return None\n return requests.auth.HTTPBasicAuth(\n profile['grafana_user'],\n profile['grafana_password']\n )\n" ]
# -*- coding: utf-8 -*- ''' Module for working with the Grafana v4 API .. versionadded:: 2017.7.0 :depends: requests :configuration: This module requires a configuration profile to be configured in the minion config, minion pillar, or master config. The module will use the 'grafana' key by default, if defined. For example: .. code-block:: yaml grafana: grafana_url: http://grafana.localhost grafana_user: admin grafana_password: admin grafana_timeout: 3 ''' from __future__ import absolute_import, print_function, unicode_literals try: import requests HAS_LIBS = True except ImportError: HAS_LIBS = False from salt.ext.six import string_types __virtualname__ = 'grafana4' def __virtual__(): ''' Only load if requests is installed ''' if HAS_LIBS: return __virtualname__ else: return False, 'The "{0}" module could not be loaded: ' \ '"requests" is not installed.'.format(__virtualname__) def _get_headers(profile): headers = {'Content-type': 'application/json'} if profile.get('grafana_token', False): headers['Authorization'] = 'Bearer {0}'.format( profile['grafana_token']) return headers def _get_auth(profile): if profile.get('grafana_token', False): return None return requests.auth.HTTPBasicAuth( profile['grafana_user'], profile['grafana_password'] ) def get_users(profile='grafana'): ''' List all users. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_users ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_user(login, profile='grafana'): ''' Show a single user. login Login of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user <login> ''' data = get_users(profile) for user in data: if user['login'] == login: return user return None def get_user_data(userid, profile='grafana'): ''' Get user data. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user_data <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_user(profile='grafana', **kwargs): ''' Create a new user. login Login of the new user. password Password of the new user. email Email of the new user. name Optional - Full name of the new user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_user login=<login> password=<password> email=<email> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.post( '{0}/api/admin/users'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_user_password(userid, profile='grafana', **kwargs): ''' Update a user password. userid Id of the user. password New password of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user_password <user_id> password=<password> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/admin/users/{1}/password'.format( profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_user_permissions(userid, profile='grafana', **kwargs): ''' Update a user password. userid Id of the user. isGrafanaAdmin Whether user is a Grafana admin. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user_permissions <user_id> isGrafanaAdmin=<true|false> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/admin/users/{1}/permissions'.format( profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_user(userid, profile='grafana'): ''' Delete a user. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_user <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/admin/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_user_orgs(userid, profile='grafana'): ''' Get the list of organisations a user belong to. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user_orgs <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users/{1}/orgs'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_user_org(userid, orgid, profile='grafana'): ''' Remove a user from an organization. userid Id of the user. orgid Id of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_user_org <user_id> <org_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/orgs/{1}/users/{2}'.format( profile['grafana_url'], orgid, userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_orgs(profile='grafana'): ''' List all organizations. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_orgs ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/orgs'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org(name, profile='grafana'): ''' Show a single organization. name Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/orgs/name/{1}'.format(profile['grafana_url'], name), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def switch_org(orgname, profile='grafana'): ''' Switch the current organization. name Name of the organization to switch to. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.switch_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) org = get_org(orgname, profile) response = requests.post( '{0}/api/user/using/{1}'.format(profile['grafana_url'], org['id']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return org def get_org_users(orgname=None, profile='grafana'): ''' Get the list of users that belong to the organization. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_users <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/users'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_org_user(orgname=None, profile='grafana', **kwargs): ''' Add user to the organization. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are added. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_org_user <orgname> loginOrEmail=<loginOrEmail> role=<role> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( '{0}/api/org/users'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_user(userid, orgname=None, profile='grafana', **kwargs): ''' Update user role in the organization. userid Id of the user. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_user <user_id> <orgname> loginOrEmail=<loginOrEmail> role=<role> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.patch( '{0}/api/org/users/{1}'.format(profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_org_user(userid, orgname=None, profile='grafana'): ''' Remove user from the organization. userid Id of the user. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_org_user <user_id> <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.delete( '{0}/api/org/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_address(orgname=None, profile='grafana'): ''' Get the organization address. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_address <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/address'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_address(orgname=None, profile='grafana', **kwargs): ''' Update the organization address. orgname Name of the organization in which users are updated. address1 Optional - address1 of the org. address2 Optional - address2 of the org. city Optional - city of the org. zip_code Optional - zip_code of the org. state Optional - state of the org. country Optional - country of the org. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_address <orgname> country=<country> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.put( '{0}/api/org/address'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_prefs(orgname=None, profile='grafana'): ''' Get the organization preferences. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_prefs <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/preferences'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_prefs(orgname=None, profile='grafana', **kwargs): ''' Update the organization preferences. orgname Name of the organization in which users are updated. theme Selected theme for the org. homeDashboardId Home dashboard for the org. timezone Timezone for the org (one of: "browser", "utc", or ""). profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_prefs <orgname> theme=<theme> timezone=<timezone> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.put( '{0}/api/org/preferences'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_org(profile='grafana', **kwargs): ''' Create a new organization. name Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.post( '{0}/api/orgs'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org(orgid, profile='grafana', **kwargs): ''' Update an existing organization. orgid Id of the organization. name New name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org <org_id> name=<name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/orgs/{1}'.format(profile['grafana_url'], orgid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_org(orgid, profile='grafana'): ''' Delete an organization. orgid Id of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_org <org_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/orgs/{1}'.format(profile['grafana_url'], orgid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_datasources(orgname=None, profile='grafana'): ''' List all datasources in an organisation. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasources <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/datasources'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_datasource(name, orgname=None, profile='grafana'): ''' Show a single datasource in an organisation. name Name of the datasource. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasource <name> <orgname> ''' data = get_datasources(orgname=orgname, profile=profile) for datasource in data: if datasource['name'] == name: return datasource return None def create_datasource(orgname=None, profile='grafana', **kwargs): ''' Create a new datasource in an organisation. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basicAuth Optional - set to True to use HTTP basic auth to authenticate with the data source. basicAuthUser Optional - HTTP basic auth username. basicAuthPassword Optional - HTTP basic auth password. jsonData Optional - additional json data to post (eg. "timeInterval"). isDefault Optional - set data source as default. withCredentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. typeLogoUrl Optional - Logo to use for this datasource. orgname Name of the organization in which the data source should be created. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_datasource ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( '{0}/api/datasources'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_datasource(datasourceid, orgname=None, profile='grafana', **kwargs): ''' Update a datasource. datasourceid Id of the datasource. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basicAuth Optional - set to True to use HTTP basic auth to authenticate with the data source. basicAuthUser Optional - HTTP basic auth username. basicAuthPassword Optional - HTTP basic auth password. jsonData Optional - additional json data to post (eg. "timeInterval"). isDefault Optional - set data source as default. withCredentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. typeLogoUrl Optional - Logo to use for this datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_datasource <datasourceid> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() # temporary fix for https://github.com/grafana/grafana/issues/6869 #return response.json() return {} def delete_datasource(datasourceid, orgname=None, profile='grafana'): ''' Delete a datasource. datasourceid Id of the datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_datasource <datasource_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_dashboard(slug, orgname=None, profile='grafana'): ''' Get a dashboard. slug Slug (name) of the dashboard. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_dashboard <slug> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/dashboards/db/{1}'.format(profile['grafana_url'], slug), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) data = response.json() if response.status_code == 404: return None if response.status_code >= 400: response.raise_for_status() return data.get('dashboard') def delete_dashboard(slug, orgname=None, profile='grafana'): ''' Delete a dashboard. slug Slug (name) of the dashboard. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_dashboard <slug> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.delete( '{0}/api/dashboards/db/{1}'.format(profile['grafana_url'], slug), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_update_dashboard(orgname=None, profile='grafana', **kwargs): ''' Create or update a dashboard. dashboard A dict that defines the dashboard to create/update. overwrite Whether the dashboard should be overwritten if already existing. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_update_dashboard dashboard=<dashboard> overwrite=True orgname=<orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( "{0}/api/dashboards/db".format(profile.get('grafana_url')), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json()
saltstack/salt
salt/modules/grafana4.py
switch_org
python
def switch_org(orgname, profile='grafana'): ''' Switch the current organization. name Name of the organization to switch to. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.switch_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) org = get_org(orgname, profile) response = requests.post( '{0}/api/user/using/{1}'.format(profile['grafana_url'], org['id']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return org
Switch the current organization. name Name of the organization to switch to. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.switch_org <name>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/grafana4.py#L459-L487
[ "def _get_headers(profile):\n headers = {'Content-type': 'application/json'}\n if profile.get('grafana_token', False):\n headers['Authorization'] = 'Bearer {0}'.format(\n profile['grafana_token'])\n return headers\n", "def _get_auth(profile):\n if profile.get('grafana_token', False):\n return None\n return requests.auth.HTTPBasicAuth(\n profile['grafana_user'],\n profile['grafana_password']\n )\n", "def get_org(name, profile='grafana'):\n '''\n Show a single organization.\n\n name\n Name of the organization.\n\n profile\n Configuration profile used to connect to the Grafana instance.\n Default is 'grafana'.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' grafana4.get_org <name>\n '''\n if isinstance(profile, string_types):\n profile = __salt__['config.option'](profile)\n response = requests.get(\n '{0}/api/orgs/name/{1}'.format(profile['grafana_url'], name),\n auth=_get_auth(profile),\n headers=_get_headers(profile),\n timeout=profile.get('grafana_timeout', 3),\n )\n if response.status_code >= 400:\n response.raise_for_status()\n return response.json()\n" ]
# -*- coding: utf-8 -*- ''' Module for working with the Grafana v4 API .. versionadded:: 2017.7.0 :depends: requests :configuration: This module requires a configuration profile to be configured in the minion config, minion pillar, or master config. The module will use the 'grafana' key by default, if defined. For example: .. code-block:: yaml grafana: grafana_url: http://grafana.localhost grafana_user: admin grafana_password: admin grafana_timeout: 3 ''' from __future__ import absolute_import, print_function, unicode_literals try: import requests HAS_LIBS = True except ImportError: HAS_LIBS = False from salt.ext.six import string_types __virtualname__ = 'grafana4' def __virtual__(): ''' Only load if requests is installed ''' if HAS_LIBS: return __virtualname__ else: return False, 'The "{0}" module could not be loaded: ' \ '"requests" is not installed.'.format(__virtualname__) def _get_headers(profile): headers = {'Content-type': 'application/json'} if profile.get('grafana_token', False): headers['Authorization'] = 'Bearer {0}'.format( profile['grafana_token']) return headers def _get_auth(profile): if profile.get('grafana_token', False): return None return requests.auth.HTTPBasicAuth( profile['grafana_user'], profile['grafana_password'] ) def get_users(profile='grafana'): ''' List all users. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_users ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_user(login, profile='grafana'): ''' Show a single user. login Login of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user <login> ''' data = get_users(profile) for user in data: if user['login'] == login: return user return None def get_user_data(userid, profile='grafana'): ''' Get user data. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user_data <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_user(profile='grafana', **kwargs): ''' Create a new user. login Login of the new user. password Password of the new user. email Email of the new user. name Optional - Full name of the new user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_user login=<login> password=<password> email=<email> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.post( '{0}/api/admin/users'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_user(userid, profile='grafana', orgid=None, **kwargs): ''' Update an existing user. userid Id of the user. login Optional - Login of the user. email Optional - Email of the user. name Optional - Full name of the user. orgid Optional - Default Organization of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user <user_id> login=<login> email=<email> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/users/{1}'.format(profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() if orgid: response2 = requests.post( '{0}/api/users/{1}/using/{2}'.format(profile['grafana_url'], userid, orgid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response2.status_code >= 400: response2.raise_for_status() return response.json() def update_user_password(userid, profile='grafana', **kwargs): ''' Update a user password. userid Id of the user. password New password of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user_password <user_id> password=<password> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/admin/users/{1}/password'.format( profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_user_permissions(userid, profile='grafana', **kwargs): ''' Update a user password. userid Id of the user. isGrafanaAdmin Whether user is a Grafana admin. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user_permissions <user_id> isGrafanaAdmin=<true|false> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/admin/users/{1}/permissions'.format( profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_user(userid, profile='grafana'): ''' Delete a user. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_user <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/admin/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_user_orgs(userid, profile='grafana'): ''' Get the list of organisations a user belong to. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user_orgs <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users/{1}/orgs'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_user_org(userid, orgid, profile='grafana'): ''' Remove a user from an organization. userid Id of the user. orgid Id of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_user_org <user_id> <org_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/orgs/{1}/users/{2}'.format( profile['grafana_url'], orgid, userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_orgs(profile='grafana'): ''' List all organizations. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_orgs ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/orgs'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org(name, profile='grafana'): ''' Show a single organization. name Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/orgs/name/{1}'.format(profile['grafana_url'], name), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_users(orgname=None, profile='grafana'): ''' Get the list of users that belong to the organization. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_users <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/users'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_org_user(orgname=None, profile='grafana', **kwargs): ''' Add user to the organization. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are added. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_org_user <orgname> loginOrEmail=<loginOrEmail> role=<role> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( '{0}/api/org/users'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_user(userid, orgname=None, profile='grafana', **kwargs): ''' Update user role in the organization. userid Id of the user. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_user <user_id> <orgname> loginOrEmail=<loginOrEmail> role=<role> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.patch( '{0}/api/org/users/{1}'.format(profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_org_user(userid, orgname=None, profile='grafana'): ''' Remove user from the organization. userid Id of the user. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_org_user <user_id> <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.delete( '{0}/api/org/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_address(orgname=None, profile='grafana'): ''' Get the organization address. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_address <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/address'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_address(orgname=None, profile='grafana', **kwargs): ''' Update the organization address. orgname Name of the organization in which users are updated. address1 Optional - address1 of the org. address2 Optional - address2 of the org. city Optional - city of the org. zip_code Optional - zip_code of the org. state Optional - state of the org. country Optional - country of the org. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_address <orgname> country=<country> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.put( '{0}/api/org/address'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_prefs(orgname=None, profile='grafana'): ''' Get the organization preferences. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_prefs <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/preferences'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_prefs(orgname=None, profile='grafana', **kwargs): ''' Update the organization preferences. orgname Name of the organization in which users are updated. theme Selected theme for the org. homeDashboardId Home dashboard for the org. timezone Timezone for the org (one of: "browser", "utc", or ""). profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_prefs <orgname> theme=<theme> timezone=<timezone> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.put( '{0}/api/org/preferences'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_org(profile='grafana', **kwargs): ''' Create a new organization. name Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.post( '{0}/api/orgs'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org(orgid, profile='grafana', **kwargs): ''' Update an existing organization. orgid Id of the organization. name New name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org <org_id> name=<name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/orgs/{1}'.format(profile['grafana_url'], orgid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_org(orgid, profile='grafana'): ''' Delete an organization. orgid Id of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_org <org_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/orgs/{1}'.format(profile['grafana_url'], orgid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_datasources(orgname=None, profile='grafana'): ''' List all datasources in an organisation. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasources <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/datasources'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_datasource(name, orgname=None, profile='grafana'): ''' Show a single datasource in an organisation. name Name of the datasource. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasource <name> <orgname> ''' data = get_datasources(orgname=orgname, profile=profile) for datasource in data: if datasource['name'] == name: return datasource return None def create_datasource(orgname=None, profile='grafana', **kwargs): ''' Create a new datasource in an organisation. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basicAuth Optional - set to True to use HTTP basic auth to authenticate with the data source. basicAuthUser Optional - HTTP basic auth username. basicAuthPassword Optional - HTTP basic auth password. jsonData Optional - additional json data to post (eg. "timeInterval"). isDefault Optional - set data source as default. withCredentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. typeLogoUrl Optional - Logo to use for this datasource. orgname Name of the organization in which the data source should be created. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_datasource ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( '{0}/api/datasources'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_datasource(datasourceid, orgname=None, profile='grafana', **kwargs): ''' Update a datasource. datasourceid Id of the datasource. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basicAuth Optional - set to True to use HTTP basic auth to authenticate with the data source. basicAuthUser Optional - HTTP basic auth username. basicAuthPassword Optional - HTTP basic auth password. jsonData Optional - additional json data to post (eg. "timeInterval"). isDefault Optional - set data source as default. withCredentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. typeLogoUrl Optional - Logo to use for this datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_datasource <datasourceid> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() # temporary fix for https://github.com/grafana/grafana/issues/6869 #return response.json() return {} def delete_datasource(datasourceid, orgname=None, profile='grafana'): ''' Delete a datasource. datasourceid Id of the datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_datasource <datasource_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_dashboard(slug, orgname=None, profile='grafana'): ''' Get a dashboard. slug Slug (name) of the dashboard. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_dashboard <slug> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/dashboards/db/{1}'.format(profile['grafana_url'], slug), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) data = response.json() if response.status_code == 404: return None if response.status_code >= 400: response.raise_for_status() return data.get('dashboard') def delete_dashboard(slug, orgname=None, profile='grafana'): ''' Delete a dashboard. slug Slug (name) of the dashboard. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_dashboard <slug> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.delete( '{0}/api/dashboards/db/{1}'.format(profile['grafana_url'], slug), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_update_dashboard(orgname=None, profile='grafana', **kwargs): ''' Create or update a dashboard. dashboard A dict that defines the dashboard to create/update. overwrite Whether the dashboard should be overwritten if already existing. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_update_dashboard dashboard=<dashboard> overwrite=True orgname=<orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( "{0}/api/dashboards/db".format(profile.get('grafana_url')), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json()
saltstack/salt
salt/modules/grafana4.py
update_org_user
python
def update_org_user(userid, orgname=None, profile='grafana', **kwargs): ''' Update user role in the organization. userid Id of the user. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_user <user_id> <orgname> loginOrEmail=<loginOrEmail> role=<role> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.patch( '{0}/api/org/users/{1}'.format(profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json()
Update user role in the organization. userid Id of the user. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_user <user_id> <orgname> loginOrEmail=<loginOrEmail> role=<role>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/grafana4.py#L565-L608
[ "def _get_headers(profile):\n headers = {'Content-type': 'application/json'}\n if profile.get('grafana_token', False):\n headers['Authorization'] = 'Bearer {0}'.format(\n profile['grafana_token'])\n return headers\n", "def _get_auth(profile):\n if profile.get('grafana_token', False):\n return None\n return requests.auth.HTTPBasicAuth(\n profile['grafana_user'],\n profile['grafana_password']\n )\n", "def switch_org(orgname, profile='grafana'):\n '''\n Switch the current organization.\n\n name\n Name of the organization to switch to.\n\n profile\n Configuration profile used to connect to the Grafana instance.\n Default is 'grafana'.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' grafana4.switch_org <name>\n '''\n if isinstance(profile, string_types):\n profile = __salt__['config.option'](profile)\n org = get_org(orgname, profile)\n response = requests.post(\n '{0}/api/user/using/{1}'.format(profile['grafana_url'], org['id']),\n auth=_get_auth(profile),\n headers=_get_headers(profile),\n timeout=profile.get('grafana_timeout', 3),\n )\n if response.status_code >= 400:\n response.raise_for_status()\n return org\n" ]
# -*- coding: utf-8 -*- ''' Module for working with the Grafana v4 API .. versionadded:: 2017.7.0 :depends: requests :configuration: This module requires a configuration profile to be configured in the minion config, minion pillar, or master config. The module will use the 'grafana' key by default, if defined. For example: .. code-block:: yaml grafana: grafana_url: http://grafana.localhost grafana_user: admin grafana_password: admin grafana_timeout: 3 ''' from __future__ import absolute_import, print_function, unicode_literals try: import requests HAS_LIBS = True except ImportError: HAS_LIBS = False from salt.ext.six import string_types __virtualname__ = 'grafana4' def __virtual__(): ''' Only load if requests is installed ''' if HAS_LIBS: return __virtualname__ else: return False, 'The "{0}" module could not be loaded: ' \ '"requests" is not installed.'.format(__virtualname__) def _get_headers(profile): headers = {'Content-type': 'application/json'} if profile.get('grafana_token', False): headers['Authorization'] = 'Bearer {0}'.format( profile['grafana_token']) return headers def _get_auth(profile): if profile.get('grafana_token', False): return None return requests.auth.HTTPBasicAuth( profile['grafana_user'], profile['grafana_password'] ) def get_users(profile='grafana'): ''' List all users. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_users ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_user(login, profile='grafana'): ''' Show a single user. login Login of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user <login> ''' data = get_users(profile) for user in data: if user['login'] == login: return user return None def get_user_data(userid, profile='grafana'): ''' Get user data. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user_data <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_user(profile='grafana', **kwargs): ''' Create a new user. login Login of the new user. password Password of the new user. email Email of the new user. name Optional - Full name of the new user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_user login=<login> password=<password> email=<email> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.post( '{0}/api/admin/users'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_user(userid, profile='grafana', orgid=None, **kwargs): ''' Update an existing user. userid Id of the user. login Optional - Login of the user. email Optional - Email of the user. name Optional - Full name of the user. orgid Optional - Default Organization of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user <user_id> login=<login> email=<email> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/users/{1}'.format(profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() if orgid: response2 = requests.post( '{0}/api/users/{1}/using/{2}'.format(profile['grafana_url'], userid, orgid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response2.status_code >= 400: response2.raise_for_status() return response.json() def update_user_password(userid, profile='grafana', **kwargs): ''' Update a user password. userid Id of the user. password New password of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user_password <user_id> password=<password> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/admin/users/{1}/password'.format( profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_user_permissions(userid, profile='grafana', **kwargs): ''' Update a user password. userid Id of the user. isGrafanaAdmin Whether user is a Grafana admin. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user_permissions <user_id> isGrafanaAdmin=<true|false> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/admin/users/{1}/permissions'.format( profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_user(userid, profile='grafana'): ''' Delete a user. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_user <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/admin/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_user_orgs(userid, profile='grafana'): ''' Get the list of organisations a user belong to. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user_orgs <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users/{1}/orgs'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_user_org(userid, orgid, profile='grafana'): ''' Remove a user from an organization. userid Id of the user. orgid Id of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_user_org <user_id> <org_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/orgs/{1}/users/{2}'.format( profile['grafana_url'], orgid, userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_orgs(profile='grafana'): ''' List all organizations. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_orgs ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/orgs'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org(name, profile='grafana'): ''' Show a single organization. name Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/orgs/name/{1}'.format(profile['grafana_url'], name), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def switch_org(orgname, profile='grafana'): ''' Switch the current organization. name Name of the organization to switch to. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.switch_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) org = get_org(orgname, profile) response = requests.post( '{0}/api/user/using/{1}'.format(profile['grafana_url'], org['id']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return org def get_org_users(orgname=None, profile='grafana'): ''' Get the list of users that belong to the organization. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_users <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/users'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_org_user(orgname=None, profile='grafana', **kwargs): ''' Add user to the organization. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are added. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_org_user <orgname> loginOrEmail=<loginOrEmail> role=<role> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( '{0}/api/org/users'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_org_user(userid, orgname=None, profile='grafana'): ''' Remove user from the organization. userid Id of the user. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_org_user <user_id> <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.delete( '{0}/api/org/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_address(orgname=None, profile='grafana'): ''' Get the organization address. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_address <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/address'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_address(orgname=None, profile='grafana', **kwargs): ''' Update the organization address. orgname Name of the organization in which users are updated. address1 Optional - address1 of the org. address2 Optional - address2 of the org. city Optional - city of the org. zip_code Optional - zip_code of the org. state Optional - state of the org. country Optional - country of the org. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_address <orgname> country=<country> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.put( '{0}/api/org/address'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_prefs(orgname=None, profile='grafana'): ''' Get the organization preferences. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_prefs <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/preferences'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_prefs(orgname=None, profile='grafana', **kwargs): ''' Update the organization preferences. orgname Name of the organization in which users are updated. theme Selected theme for the org. homeDashboardId Home dashboard for the org. timezone Timezone for the org (one of: "browser", "utc", or ""). profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_prefs <orgname> theme=<theme> timezone=<timezone> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.put( '{0}/api/org/preferences'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_org(profile='grafana', **kwargs): ''' Create a new organization. name Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.post( '{0}/api/orgs'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org(orgid, profile='grafana', **kwargs): ''' Update an existing organization. orgid Id of the organization. name New name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org <org_id> name=<name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/orgs/{1}'.format(profile['grafana_url'], orgid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_org(orgid, profile='grafana'): ''' Delete an organization. orgid Id of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_org <org_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/orgs/{1}'.format(profile['grafana_url'], orgid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_datasources(orgname=None, profile='grafana'): ''' List all datasources in an organisation. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasources <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/datasources'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_datasource(name, orgname=None, profile='grafana'): ''' Show a single datasource in an organisation. name Name of the datasource. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasource <name> <orgname> ''' data = get_datasources(orgname=orgname, profile=profile) for datasource in data: if datasource['name'] == name: return datasource return None def create_datasource(orgname=None, profile='grafana', **kwargs): ''' Create a new datasource in an organisation. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basicAuth Optional - set to True to use HTTP basic auth to authenticate with the data source. basicAuthUser Optional - HTTP basic auth username. basicAuthPassword Optional - HTTP basic auth password. jsonData Optional - additional json data to post (eg. "timeInterval"). isDefault Optional - set data source as default. withCredentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. typeLogoUrl Optional - Logo to use for this datasource. orgname Name of the organization in which the data source should be created. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_datasource ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( '{0}/api/datasources'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_datasource(datasourceid, orgname=None, profile='grafana', **kwargs): ''' Update a datasource. datasourceid Id of the datasource. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basicAuth Optional - set to True to use HTTP basic auth to authenticate with the data source. basicAuthUser Optional - HTTP basic auth username. basicAuthPassword Optional - HTTP basic auth password. jsonData Optional - additional json data to post (eg. "timeInterval"). isDefault Optional - set data source as default. withCredentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. typeLogoUrl Optional - Logo to use for this datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_datasource <datasourceid> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() # temporary fix for https://github.com/grafana/grafana/issues/6869 #return response.json() return {} def delete_datasource(datasourceid, orgname=None, profile='grafana'): ''' Delete a datasource. datasourceid Id of the datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_datasource <datasource_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_dashboard(slug, orgname=None, profile='grafana'): ''' Get a dashboard. slug Slug (name) of the dashboard. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_dashboard <slug> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/dashboards/db/{1}'.format(profile['grafana_url'], slug), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) data = response.json() if response.status_code == 404: return None if response.status_code >= 400: response.raise_for_status() return data.get('dashboard') def delete_dashboard(slug, orgname=None, profile='grafana'): ''' Delete a dashboard. slug Slug (name) of the dashboard. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_dashboard <slug> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.delete( '{0}/api/dashboards/db/{1}'.format(profile['grafana_url'], slug), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_update_dashboard(orgname=None, profile='grafana', **kwargs): ''' Create or update a dashboard. dashboard A dict that defines the dashboard to create/update. overwrite Whether the dashboard should be overwritten if already existing. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_update_dashboard dashboard=<dashboard> overwrite=True orgname=<orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( "{0}/api/dashboards/db".format(profile.get('grafana_url')), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json()
saltstack/salt
salt/modules/grafana4.py
get_datasource
python
def get_datasource(name, orgname=None, profile='grafana'): ''' Show a single datasource in an organisation. name Name of the datasource. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasource <name> <orgname> ''' data = get_datasources(orgname=orgname, profile=profile) for datasource in data: if datasource['name'] == name: return datasource return None
Show a single datasource in an organisation. name Name of the datasource. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasource <name> <orgname>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/grafana4.py#L930-L954
[ "def get_datasources(orgname=None, profile='grafana'):\n '''\n List all datasources in an organisation.\n\n orgname\n Name of the organization.\n\n profile\n Configuration profile used to connect to the Grafana instance.\n Default is 'grafana'.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' grafana4.get_datasources <orgname>\n '''\n if isinstance(profile, string_types):\n profile = __salt__['config.option'](profile)\n if orgname:\n switch_org(orgname, profile)\n response = requests.get(\n '{0}/api/datasources'.format(profile['grafana_url']),\n auth=_get_auth(profile),\n headers=_get_headers(profile),\n timeout=profile.get('grafana_timeout', 3),\n )\n if response.status_code >= 400:\n response.raise_for_status()\n return response.json()\n" ]
# -*- coding: utf-8 -*- ''' Module for working with the Grafana v4 API .. versionadded:: 2017.7.0 :depends: requests :configuration: This module requires a configuration profile to be configured in the minion config, minion pillar, or master config. The module will use the 'grafana' key by default, if defined. For example: .. code-block:: yaml grafana: grafana_url: http://grafana.localhost grafana_user: admin grafana_password: admin grafana_timeout: 3 ''' from __future__ import absolute_import, print_function, unicode_literals try: import requests HAS_LIBS = True except ImportError: HAS_LIBS = False from salt.ext.six import string_types __virtualname__ = 'grafana4' def __virtual__(): ''' Only load if requests is installed ''' if HAS_LIBS: return __virtualname__ else: return False, 'The "{0}" module could not be loaded: ' \ '"requests" is not installed.'.format(__virtualname__) def _get_headers(profile): headers = {'Content-type': 'application/json'} if profile.get('grafana_token', False): headers['Authorization'] = 'Bearer {0}'.format( profile['grafana_token']) return headers def _get_auth(profile): if profile.get('grafana_token', False): return None return requests.auth.HTTPBasicAuth( profile['grafana_user'], profile['grafana_password'] ) def get_users(profile='grafana'): ''' List all users. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_users ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_user(login, profile='grafana'): ''' Show a single user. login Login of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user <login> ''' data = get_users(profile) for user in data: if user['login'] == login: return user return None def get_user_data(userid, profile='grafana'): ''' Get user data. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user_data <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_user(profile='grafana', **kwargs): ''' Create a new user. login Login of the new user. password Password of the new user. email Email of the new user. name Optional - Full name of the new user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_user login=<login> password=<password> email=<email> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.post( '{0}/api/admin/users'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_user(userid, profile='grafana', orgid=None, **kwargs): ''' Update an existing user. userid Id of the user. login Optional - Login of the user. email Optional - Email of the user. name Optional - Full name of the user. orgid Optional - Default Organization of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user <user_id> login=<login> email=<email> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/users/{1}'.format(profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() if orgid: response2 = requests.post( '{0}/api/users/{1}/using/{2}'.format(profile['grafana_url'], userid, orgid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response2.status_code >= 400: response2.raise_for_status() return response.json() def update_user_password(userid, profile='grafana', **kwargs): ''' Update a user password. userid Id of the user. password New password of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user_password <user_id> password=<password> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/admin/users/{1}/password'.format( profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_user_permissions(userid, profile='grafana', **kwargs): ''' Update a user password. userid Id of the user. isGrafanaAdmin Whether user is a Grafana admin. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user_permissions <user_id> isGrafanaAdmin=<true|false> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/admin/users/{1}/permissions'.format( profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_user(userid, profile='grafana'): ''' Delete a user. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_user <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/admin/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_user_orgs(userid, profile='grafana'): ''' Get the list of organisations a user belong to. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user_orgs <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users/{1}/orgs'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_user_org(userid, orgid, profile='grafana'): ''' Remove a user from an organization. userid Id of the user. orgid Id of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_user_org <user_id> <org_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/orgs/{1}/users/{2}'.format( profile['grafana_url'], orgid, userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_orgs(profile='grafana'): ''' List all organizations. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_orgs ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/orgs'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org(name, profile='grafana'): ''' Show a single organization. name Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/orgs/name/{1}'.format(profile['grafana_url'], name), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def switch_org(orgname, profile='grafana'): ''' Switch the current organization. name Name of the organization to switch to. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.switch_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) org = get_org(orgname, profile) response = requests.post( '{0}/api/user/using/{1}'.format(profile['grafana_url'], org['id']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return org def get_org_users(orgname=None, profile='grafana'): ''' Get the list of users that belong to the organization. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_users <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/users'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_org_user(orgname=None, profile='grafana', **kwargs): ''' Add user to the organization. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are added. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_org_user <orgname> loginOrEmail=<loginOrEmail> role=<role> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( '{0}/api/org/users'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_user(userid, orgname=None, profile='grafana', **kwargs): ''' Update user role in the organization. userid Id of the user. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_user <user_id> <orgname> loginOrEmail=<loginOrEmail> role=<role> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.patch( '{0}/api/org/users/{1}'.format(profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_org_user(userid, orgname=None, profile='grafana'): ''' Remove user from the organization. userid Id of the user. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_org_user <user_id> <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.delete( '{0}/api/org/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_address(orgname=None, profile='grafana'): ''' Get the organization address. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_address <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/address'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_address(orgname=None, profile='grafana', **kwargs): ''' Update the organization address. orgname Name of the organization in which users are updated. address1 Optional - address1 of the org. address2 Optional - address2 of the org. city Optional - city of the org. zip_code Optional - zip_code of the org. state Optional - state of the org. country Optional - country of the org. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_address <orgname> country=<country> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.put( '{0}/api/org/address'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_prefs(orgname=None, profile='grafana'): ''' Get the organization preferences. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_prefs <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/preferences'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_prefs(orgname=None, profile='grafana', **kwargs): ''' Update the organization preferences. orgname Name of the organization in which users are updated. theme Selected theme for the org. homeDashboardId Home dashboard for the org. timezone Timezone for the org (one of: "browser", "utc", or ""). profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_prefs <orgname> theme=<theme> timezone=<timezone> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.put( '{0}/api/org/preferences'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_org(profile='grafana', **kwargs): ''' Create a new organization. name Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.post( '{0}/api/orgs'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org(orgid, profile='grafana', **kwargs): ''' Update an existing organization. orgid Id of the organization. name New name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org <org_id> name=<name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/orgs/{1}'.format(profile['grafana_url'], orgid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_org(orgid, profile='grafana'): ''' Delete an organization. orgid Id of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_org <org_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/orgs/{1}'.format(profile['grafana_url'], orgid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_datasources(orgname=None, profile='grafana'): ''' List all datasources in an organisation. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasources <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/datasources'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_datasource(orgname=None, profile='grafana', **kwargs): ''' Create a new datasource in an organisation. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basicAuth Optional - set to True to use HTTP basic auth to authenticate with the data source. basicAuthUser Optional - HTTP basic auth username. basicAuthPassword Optional - HTTP basic auth password. jsonData Optional - additional json data to post (eg. "timeInterval"). isDefault Optional - set data source as default. withCredentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. typeLogoUrl Optional - Logo to use for this datasource. orgname Name of the organization in which the data source should be created. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_datasource ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( '{0}/api/datasources'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_datasource(datasourceid, orgname=None, profile='grafana', **kwargs): ''' Update a datasource. datasourceid Id of the datasource. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basicAuth Optional - set to True to use HTTP basic auth to authenticate with the data source. basicAuthUser Optional - HTTP basic auth username. basicAuthPassword Optional - HTTP basic auth password. jsonData Optional - additional json data to post (eg. "timeInterval"). isDefault Optional - set data source as default. withCredentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. typeLogoUrl Optional - Logo to use for this datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_datasource <datasourceid> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() # temporary fix for https://github.com/grafana/grafana/issues/6869 #return response.json() return {} def delete_datasource(datasourceid, orgname=None, profile='grafana'): ''' Delete a datasource. datasourceid Id of the datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_datasource <datasource_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_dashboard(slug, orgname=None, profile='grafana'): ''' Get a dashboard. slug Slug (name) of the dashboard. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_dashboard <slug> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/dashboards/db/{1}'.format(profile['grafana_url'], slug), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) data = response.json() if response.status_code == 404: return None if response.status_code >= 400: response.raise_for_status() return data.get('dashboard') def delete_dashboard(slug, orgname=None, profile='grafana'): ''' Delete a dashboard. slug Slug (name) of the dashboard. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_dashboard <slug> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.delete( '{0}/api/dashboards/db/{1}'.format(profile['grafana_url'], slug), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_update_dashboard(orgname=None, profile='grafana', **kwargs): ''' Create or update a dashboard. dashboard A dict that defines the dashboard to create/update. overwrite Whether the dashboard should be overwritten if already existing. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_update_dashboard dashboard=<dashboard> overwrite=True orgname=<orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( "{0}/api/dashboards/db".format(profile.get('grafana_url')), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json()
saltstack/salt
salt/modules/grafana4.py
delete_datasource
python
def delete_datasource(datasourceid, orgname=None, profile='grafana'): ''' Delete a datasource. datasourceid Id of the datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_datasource <datasource_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json()
Delete a datasource. datasourceid Id of the datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_datasource <datasource_id>
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/grafana4.py#L1113-L1140
[ "def _get_headers(profile):\n headers = {'Content-type': 'application/json'}\n if profile.get('grafana_token', False):\n headers['Authorization'] = 'Bearer {0}'.format(\n profile['grafana_token'])\n return headers\n", "def _get_auth(profile):\n if profile.get('grafana_token', False):\n return None\n return requests.auth.HTTPBasicAuth(\n profile['grafana_user'],\n profile['grafana_password']\n )\n" ]
# -*- coding: utf-8 -*- ''' Module for working with the Grafana v4 API .. versionadded:: 2017.7.0 :depends: requests :configuration: This module requires a configuration profile to be configured in the minion config, minion pillar, or master config. The module will use the 'grafana' key by default, if defined. For example: .. code-block:: yaml grafana: grafana_url: http://grafana.localhost grafana_user: admin grafana_password: admin grafana_timeout: 3 ''' from __future__ import absolute_import, print_function, unicode_literals try: import requests HAS_LIBS = True except ImportError: HAS_LIBS = False from salt.ext.six import string_types __virtualname__ = 'grafana4' def __virtual__(): ''' Only load if requests is installed ''' if HAS_LIBS: return __virtualname__ else: return False, 'The "{0}" module could not be loaded: ' \ '"requests" is not installed.'.format(__virtualname__) def _get_headers(profile): headers = {'Content-type': 'application/json'} if profile.get('grafana_token', False): headers['Authorization'] = 'Bearer {0}'.format( profile['grafana_token']) return headers def _get_auth(profile): if profile.get('grafana_token', False): return None return requests.auth.HTTPBasicAuth( profile['grafana_user'], profile['grafana_password'] ) def get_users(profile='grafana'): ''' List all users. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_users ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_user(login, profile='grafana'): ''' Show a single user. login Login of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user <login> ''' data = get_users(profile) for user in data: if user['login'] == login: return user return None def get_user_data(userid, profile='grafana'): ''' Get user data. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user_data <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_user(profile='grafana', **kwargs): ''' Create a new user. login Login of the new user. password Password of the new user. email Email of the new user. name Optional - Full name of the new user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_user login=<login> password=<password> email=<email> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.post( '{0}/api/admin/users'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_user(userid, profile='grafana', orgid=None, **kwargs): ''' Update an existing user. userid Id of the user. login Optional - Login of the user. email Optional - Email of the user. name Optional - Full name of the user. orgid Optional - Default Organization of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user <user_id> login=<login> email=<email> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/users/{1}'.format(profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() if orgid: response2 = requests.post( '{0}/api/users/{1}/using/{2}'.format(profile['grafana_url'], userid, orgid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response2.status_code >= 400: response2.raise_for_status() return response.json() def update_user_password(userid, profile='grafana', **kwargs): ''' Update a user password. userid Id of the user. password New password of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user_password <user_id> password=<password> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/admin/users/{1}/password'.format( profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_user_permissions(userid, profile='grafana', **kwargs): ''' Update a user password. userid Id of the user. isGrafanaAdmin Whether user is a Grafana admin. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_user_permissions <user_id> isGrafanaAdmin=<true|false> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/admin/users/{1}/permissions'.format( profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_user(userid, profile='grafana'): ''' Delete a user. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_user <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/admin/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_user_orgs(userid, profile='grafana'): ''' Get the list of organisations a user belong to. userid Id of the user. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_user_orgs <user_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/users/{1}/orgs'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_user_org(userid, orgid, profile='grafana'): ''' Remove a user from an organization. userid Id of the user. orgid Id of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_user_org <user_id> <org_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/orgs/{1}/users/{2}'.format( profile['grafana_url'], orgid, userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_orgs(profile='grafana'): ''' List all organizations. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_orgs ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/orgs'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org(name, profile='grafana'): ''' Show a single organization. name Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.get( '{0}/api/orgs/name/{1}'.format(profile['grafana_url'], name), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def switch_org(orgname, profile='grafana'): ''' Switch the current organization. name Name of the organization to switch to. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.switch_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) org = get_org(orgname, profile) response = requests.post( '{0}/api/user/using/{1}'.format(profile['grafana_url'], org['id']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return org def get_org_users(orgname=None, profile='grafana'): ''' Get the list of users that belong to the organization. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_users <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/users'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_org_user(orgname=None, profile='grafana', **kwargs): ''' Add user to the organization. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are added. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_org_user <orgname> loginOrEmail=<loginOrEmail> role=<role> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( '{0}/api/org/users'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_user(userid, orgname=None, profile='grafana', **kwargs): ''' Update user role in the organization. userid Id of the user. loginOrEmail Login or email of the user. role Role of the user for this organization. Should be one of: - Admin - Editor - Read Only Editor - Viewer orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_user <user_id> <orgname> loginOrEmail=<loginOrEmail> role=<role> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.patch( '{0}/api/org/users/{1}'.format(profile['grafana_url'], userid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_org_user(userid, orgname=None, profile='grafana'): ''' Remove user from the organization. userid Id of the user. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_org_user <user_id> <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.delete( '{0}/api/org/users/{1}'.format(profile['grafana_url'], userid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_address(orgname=None, profile='grafana'): ''' Get the organization address. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_address <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/address'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_address(orgname=None, profile='grafana', **kwargs): ''' Update the organization address. orgname Name of the organization in which users are updated. address1 Optional - address1 of the org. address2 Optional - address2 of the org. city Optional - city of the org. zip_code Optional - zip_code of the org. state Optional - state of the org. country Optional - country of the org. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_address <orgname> country=<country> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.put( '{0}/api/org/address'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_org_prefs(orgname=None, profile='grafana'): ''' Get the organization preferences. orgname Name of the organization in which users are updated. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_org_prefs <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/org/preferences'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org_prefs(orgname=None, profile='grafana', **kwargs): ''' Update the organization preferences. orgname Name of the organization in which users are updated. theme Selected theme for the org. homeDashboardId Home dashboard for the org. timezone Timezone for the org (one of: "browser", "utc", or ""). profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org_prefs <orgname> theme=<theme> timezone=<timezone> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.put( '{0}/api/org/preferences'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_org(profile='grafana', **kwargs): ''' Create a new organization. name Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_org <name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.post( '{0}/api/orgs'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_org(orgid, profile='grafana', **kwargs): ''' Update an existing organization. orgid Id of the organization. name New name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_org <org_id> name=<name> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/orgs/{1}'.format(profile['grafana_url'], orgid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def delete_org(orgid, profile='grafana'): ''' Delete an organization. orgid Id of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_org <org_id> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.delete( '{0}/api/orgs/{1}'.format(profile['grafana_url'], orgid), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_datasources(orgname=None, profile='grafana'): ''' List all datasources in an organisation. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasources <orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/datasources'.format(profile['grafana_url']), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def get_datasource(name, orgname=None, profile='grafana'): ''' Show a single datasource in an organisation. name Name of the datasource. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_datasource <name> <orgname> ''' data = get_datasources(orgname=orgname, profile=profile) for datasource in data: if datasource['name'] == name: return datasource return None def create_datasource(orgname=None, profile='grafana', **kwargs): ''' Create a new datasource in an organisation. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basicAuth Optional - set to True to use HTTP basic auth to authenticate with the data source. basicAuthUser Optional - HTTP basic auth username. basicAuthPassword Optional - HTTP basic auth password. jsonData Optional - additional json data to post (eg. "timeInterval"). isDefault Optional - set data source as default. withCredentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. typeLogoUrl Optional - Logo to use for this datasource. orgname Name of the organization in which the data source should be created. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_datasource ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( '{0}/api/datasources'.format(profile['grafana_url']), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def update_datasource(datasourceid, orgname=None, profile='grafana', **kwargs): ''' Update a datasource. datasourceid Id of the datasource. name Name of the data source. type Type of the datasource ('graphite', 'influxdb' etc.). access Use proxy or direct. url The URL to the data source API. user Optional - user to authenticate with the data source. password Optional - password to authenticate with the data source. database Optional - database to use with the data source. basicAuth Optional - set to True to use HTTP basic auth to authenticate with the data source. basicAuthUser Optional - HTTP basic auth username. basicAuthPassword Optional - HTTP basic auth password. jsonData Optional - additional json data to post (eg. "timeInterval"). isDefault Optional - set data source as default. withCredentials Optional - Whether credentials such as cookies or auth headers should be sent with cross-site requests. typeLogoUrl Optional - Logo to use for this datasource. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.update_datasource <datasourceid> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) response = requests.put( '{0}/api/datasources/{1}'.format(profile['grafana_url'], datasourceid), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() # temporary fix for https://github.com/grafana/grafana/issues/6869 #return response.json() return {} def get_dashboard(slug, orgname=None, profile='grafana'): ''' Get a dashboard. slug Slug (name) of the dashboard. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.get_dashboard <slug> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.get( '{0}/api/dashboards/db/{1}'.format(profile['grafana_url'], slug), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) data = response.json() if response.status_code == 404: return None if response.status_code >= 400: response.raise_for_status() return data.get('dashboard') def delete_dashboard(slug, orgname=None, profile='grafana'): ''' Delete a dashboard. slug Slug (name) of the dashboard. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.delete_dashboard <slug> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.delete( '{0}/api/dashboards/db/{1}'.format(profile['grafana_url'], slug), auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json() def create_update_dashboard(orgname=None, profile='grafana', **kwargs): ''' Create or update a dashboard. dashboard A dict that defines the dashboard to create/update. overwrite Whether the dashboard should be overwritten if already existing. orgname Name of the organization. profile Configuration profile used to connect to the Grafana instance. Default is 'grafana'. CLI Example: .. code-block:: bash salt '*' grafana4.create_update_dashboard dashboard=<dashboard> overwrite=True orgname=<orgname> ''' if isinstance(profile, string_types): profile = __salt__['config.option'](profile) if orgname: switch_org(orgname, profile) response = requests.post( "{0}/api/dashboards/db".format(profile.get('grafana_url')), json=kwargs, auth=_get_auth(profile), headers=_get_headers(profile), timeout=profile.get('grafana_timeout', 3), ) if response.status_code >= 400: response.raise_for_status() return response.json()
saltstack/salt
salt/pillar/libvirt.py
ext_pillar
python
def ext_pillar(minion_id, pillar, # pylint: disable=W0613 command): # pylint: disable=W0613 ''' Read in the generated libvirt keys ''' key_dir = os.path.join( __opts__['pki_dir'], 'libvirt', minion_id) cacert = os.path.join(__opts__['pki_dir'], 'libvirt', 'cacert.pem') if not os.path.isdir(key_dir): # No keys have been generated gen_hyper_keys(minion_id, pillar.get('ext_pillar_virt.country', 'US'), pillar.get('ext_pillar_virt.st', 'Utah'), pillar.get('ext_pillar_virt.locality', 'Salt Lake City'), pillar.get('ext_pillar_virt.organization', 'Salted'), pillar.get('ext_pillar_virt.expiration_days', '365') ) ret = {} for key in os.listdir(key_dir): if not key.endswith('.pem'): continue fn_ = os.path.join(key_dir, key) with salt.utils.files.fopen(fn_, 'r') as fp_: ret['libvirt.{0}'.format(key)] = \ salt.utils.stringutils.to_unicode(fp_.read()) with salt.utils.files.fopen(cacert, 'r') as fp_: ret['libvirt.cacert.pem'] = \ salt.utils.stringutils.to_unicode(fp_.read()) return ret
Read in the generated libvirt keys
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/libvirt.py#L27-L61
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n", "def gen_hyper_keys(minion_id,\n country='US',\n state='Utah',\n locality='Salt Lake City',\n organization='Salted',\n expiration_days='365'):\n '''\n Generate the keys to be used by libvirt hypervisors, this routine gens\n the keys and applies them to the pillar for the hypervisor minions\n '''\n key_dir = os.path.join(\n __opts__['pki_dir'],\n 'libvirt')\n if not os.path.isdir(key_dir):\n os.makedirs(key_dir)\n cakey = os.path.join(key_dir, 'cakey.pem')\n cacert = os.path.join(key_dir, 'cacert.pem')\n cainfo = os.path.join(key_dir, 'ca.info')\n if not os.path.isfile(cainfo):\n with salt.utils.files.fopen(cainfo, 'w+') as fp_:\n fp_.write('cn = salted\\nca\\ncert_signing_key')\n if not os.path.isfile(cakey):\n subprocess.call(\n 'certtool --generate-privkey > {0}'.format(cakey),\n shell=True)\n if not os.path.isfile(cacert):\n cmd = ('certtool --generate-self-signed --load-privkey {0} '\n '--template {1} --outfile {2}').format(cakey, cainfo, cacert)\n subprocess.call(cmd, shell=True)\n sub_dir = os.path.join(key_dir, minion_id)\n if not os.path.isdir(sub_dir):\n os.makedirs(sub_dir)\n priv = os.path.join(sub_dir, 'serverkey.pem')\n cert = os.path.join(sub_dir, 'servercert.pem')\n srvinfo = os.path.join(sub_dir, 'server.info')\n cpriv = os.path.join(sub_dir, 'clientkey.pem')\n ccert = os.path.join(sub_dir, 'clientcert.pem')\n clientinfo = os.path.join(sub_dir, 'client.info')\n if not os.path.isfile(srvinfo):\n with salt.utils.files.fopen(srvinfo, 'w+') as fp_:\n infodat = salt.utils.stringutils.to_str(\n 'organization = salted\\ncn = {0}\\ntls_www_server'\n '\\nencryption_key\\nsigning_key'\n '\\ndigitalSignature\\nexpiration_days = {1}'.format(\n __grains__['fqdn'], expiration_days\n )\n )\n fp_.write(infodat)\n if not os.path.isfile(priv):\n subprocess.call(\n 'certtool --generate-privkey > {0}'.format(priv),\n shell=True)\n if not os.path.isfile(cert):\n cmd = ('certtool --generate-certificate --load-privkey {0} '\n '--load-ca-certificate {1} --load-ca-privkey {2} '\n '--template {3} --outfile {4}'\n ).format(priv, cacert, cakey, srvinfo, cert)\n subprocess.call(cmd, shell=True)\n if not os.path.isfile(clientinfo):\n with salt.utils.files.fopen(clientinfo, 'w+') as fp_:\n infodat = salt.utils.stringutils.to_str(\n 'country = {0}\\nstate = {1}\\nlocality = {2}\\n'\n 'organization = {3}\\ncn = {4}\\n'\n 'tls_www_client\\nencryption_key\\nsigning_key\\n'\n 'digitalSignature'.format(\n country,\n state,\n locality,\n organization,\n __grains__['fqdn']\n )\n )\n fp_.write(infodat)\n if not os.path.isfile(cpriv):\n subprocess.call(\n 'certtool --generate-privkey > {0}'.format(cpriv),\n shell=True)\n if not os.path.isfile(ccert):\n cmd = ('certtool --generate-certificate --load-privkey {0} '\n '--load-ca-certificate {1} --load-ca-privkey {2} '\n '--template {3} --outfile {4}'\n ).format(cpriv, cacert, cakey, clientinfo, ccert)\n subprocess.call(cmd, shell=True)\n" ]
# -*- coding: utf-8 -*- ''' Load up the libvirt keys into Pillar for a given minion if said keys have been generated using the libvirt key runner :depends: certtool ''' from __future__ import absolute_import, print_function, unicode_literals # Don't "fix" the above docstring to put it on two lines, as the sphinx # autosummary pulls only the first line for its description. # Import python libs import os import subprocess # Import salt libs import salt.utils.files import salt.utils.path import salt.utils.stringutils def __virtual__(): return salt.utils.path.which('certtool') is not None def gen_hyper_keys(minion_id, country='US', state='Utah', locality='Salt Lake City', organization='Salted', expiration_days='365'): ''' Generate the keys to be used by libvirt hypervisors, this routine gens the keys and applies them to the pillar for the hypervisor minions ''' key_dir = os.path.join( __opts__['pki_dir'], 'libvirt') if not os.path.isdir(key_dir): os.makedirs(key_dir) cakey = os.path.join(key_dir, 'cakey.pem') cacert = os.path.join(key_dir, 'cacert.pem') cainfo = os.path.join(key_dir, 'ca.info') if not os.path.isfile(cainfo): with salt.utils.files.fopen(cainfo, 'w+') as fp_: fp_.write('cn = salted\nca\ncert_signing_key') if not os.path.isfile(cakey): subprocess.call( 'certtool --generate-privkey > {0}'.format(cakey), shell=True) if not os.path.isfile(cacert): cmd = ('certtool --generate-self-signed --load-privkey {0} ' '--template {1} --outfile {2}').format(cakey, cainfo, cacert) subprocess.call(cmd, shell=True) sub_dir = os.path.join(key_dir, minion_id) if not os.path.isdir(sub_dir): os.makedirs(sub_dir) priv = os.path.join(sub_dir, 'serverkey.pem') cert = os.path.join(sub_dir, 'servercert.pem') srvinfo = os.path.join(sub_dir, 'server.info') cpriv = os.path.join(sub_dir, 'clientkey.pem') ccert = os.path.join(sub_dir, 'clientcert.pem') clientinfo = os.path.join(sub_dir, 'client.info') if not os.path.isfile(srvinfo): with salt.utils.files.fopen(srvinfo, 'w+') as fp_: infodat = salt.utils.stringutils.to_str( 'organization = salted\ncn = {0}\ntls_www_server' '\nencryption_key\nsigning_key' '\ndigitalSignature\nexpiration_days = {1}'.format( __grains__['fqdn'], expiration_days ) ) fp_.write(infodat) if not os.path.isfile(priv): subprocess.call( 'certtool --generate-privkey > {0}'.format(priv), shell=True) if not os.path.isfile(cert): cmd = ('certtool --generate-certificate --load-privkey {0} ' '--load-ca-certificate {1} --load-ca-privkey {2} ' '--template {3} --outfile {4}' ).format(priv, cacert, cakey, srvinfo, cert) subprocess.call(cmd, shell=True) if not os.path.isfile(clientinfo): with salt.utils.files.fopen(clientinfo, 'w+') as fp_: infodat = salt.utils.stringutils.to_str( 'country = {0}\nstate = {1}\nlocality = {2}\n' 'organization = {3}\ncn = {4}\n' 'tls_www_client\nencryption_key\nsigning_key\n' 'digitalSignature'.format( country, state, locality, organization, __grains__['fqdn'] ) ) fp_.write(infodat) if not os.path.isfile(cpriv): subprocess.call( 'certtool --generate-privkey > {0}'.format(cpriv), shell=True) if not os.path.isfile(ccert): cmd = ('certtool --generate-certificate --load-privkey {0} ' '--load-ca-certificate {1} --load-ca-privkey {2} ' '--template {3} --outfile {4}' ).format(cpriv, cacert, cakey, clientinfo, ccert) subprocess.call(cmd, shell=True)
saltstack/salt
salt/pillar/libvirt.py
gen_hyper_keys
python
def gen_hyper_keys(minion_id, country='US', state='Utah', locality='Salt Lake City', organization='Salted', expiration_days='365'): ''' Generate the keys to be used by libvirt hypervisors, this routine gens the keys and applies them to the pillar for the hypervisor minions ''' key_dir = os.path.join( __opts__['pki_dir'], 'libvirt') if not os.path.isdir(key_dir): os.makedirs(key_dir) cakey = os.path.join(key_dir, 'cakey.pem') cacert = os.path.join(key_dir, 'cacert.pem') cainfo = os.path.join(key_dir, 'ca.info') if not os.path.isfile(cainfo): with salt.utils.files.fopen(cainfo, 'w+') as fp_: fp_.write('cn = salted\nca\ncert_signing_key') if not os.path.isfile(cakey): subprocess.call( 'certtool --generate-privkey > {0}'.format(cakey), shell=True) if not os.path.isfile(cacert): cmd = ('certtool --generate-self-signed --load-privkey {0} ' '--template {1} --outfile {2}').format(cakey, cainfo, cacert) subprocess.call(cmd, shell=True) sub_dir = os.path.join(key_dir, minion_id) if not os.path.isdir(sub_dir): os.makedirs(sub_dir) priv = os.path.join(sub_dir, 'serverkey.pem') cert = os.path.join(sub_dir, 'servercert.pem') srvinfo = os.path.join(sub_dir, 'server.info') cpriv = os.path.join(sub_dir, 'clientkey.pem') ccert = os.path.join(sub_dir, 'clientcert.pem') clientinfo = os.path.join(sub_dir, 'client.info') if not os.path.isfile(srvinfo): with salt.utils.files.fopen(srvinfo, 'w+') as fp_: infodat = salt.utils.stringutils.to_str( 'organization = salted\ncn = {0}\ntls_www_server' '\nencryption_key\nsigning_key' '\ndigitalSignature\nexpiration_days = {1}'.format( __grains__['fqdn'], expiration_days ) ) fp_.write(infodat) if not os.path.isfile(priv): subprocess.call( 'certtool --generate-privkey > {0}'.format(priv), shell=True) if not os.path.isfile(cert): cmd = ('certtool --generate-certificate --load-privkey {0} ' '--load-ca-certificate {1} --load-ca-privkey {2} ' '--template {3} --outfile {4}' ).format(priv, cacert, cakey, srvinfo, cert) subprocess.call(cmd, shell=True) if not os.path.isfile(clientinfo): with salt.utils.files.fopen(clientinfo, 'w+') as fp_: infodat = salt.utils.stringutils.to_str( 'country = {0}\nstate = {1}\nlocality = {2}\n' 'organization = {3}\ncn = {4}\n' 'tls_www_client\nencryption_key\nsigning_key\n' 'digitalSignature'.format( country, state, locality, organization, __grains__['fqdn'] ) ) fp_.write(infodat) if not os.path.isfile(cpriv): subprocess.call( 'certtool --generate-privkey > {0}'.format(cpriv), shell=True) if not os.path.isfile(ccert): cmd = ('certtool --generate-certificate --load-privkey {0} ' '--load-ca-certificate {1} --load-ca-privkey {2} ' '--template {3} --outfile {4}' ).format(cpriv, cacert, cakey, clientinfo, ccert) subprocess.call(cmd, shell=True)
Generate the keys to be used by libvirt hypervisors, this routine gens the keys and applies them to the pillar for the hypervisor minions
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/pillar/libvirt.py#L64-L146
null
# -*- coding: utf-8 -*- ''' Load up the libvirt keys into Pillar for a given minion if said keys have been generated using the libvirt key runner :depends: certtool ''' from __future__ import absolute_import, print_function, unicode_literals # Don't "fix" the above docstring to put it on two lines, as the sphinx # autosummary pulls only the first line for its description. # Import python libs import os import subprocess # Import salt libs import salt.utils.files import salt.utils.path import salt.utils.stringutils def __virtual__(): return salt.utils.path.which('certtool') is not None def ext_pillar(minion_id, pillar, # pylint: disable=W0613 command): # pylint: disable=W0613 ''' Read in the generated libvirt keys ''' key_dir = os.path.join( __opts__['pki_dir'], 'libvirt', minion_id) cacert = os.path.join(__opts__['pki_dir'], 'libvirt', 'cacert.pem') if not os.path.isdir(key_dir): # No keys have been generated gen_hyper_keys(minion_id, pillar.get('ext_pillar_virt.country', 'US'), pillar.get('ext_pillar_virt.st', 'Utah'), pillar.get('ext_pillar_virt.locality', 'Salt Lake City'), pillar.get('ext_pillar_virt.organization', 'Salted'), pillar.get('ext_pillar_virt.expiration_days', '365') ) ret = {} for key in os.listdir(key_dir): if not key.endswith('.pem'): continue fn_ = os.path.join(key_dir, key) with salt.utils.files.fopen(fn_, 'r') as fp_: ret['libvirt.{0}'.format(key)] = \ salt.utils.stringutils.to_unicode(fp_.read()) with salt.utils.files.fopen(cacert, 'r') as fp_: ret['libvirt.cacert.pem'] = \ salt.utils.stringutils.to_unicode(fp_.read()) return ret
saltstack/salt
salt/states/boto_ec2.py
key_present
python
def key_present(name, save_private=None, upload_public=None, region=None, key=None, keyid=None, profile=None): ''' Ensure key pair is present. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) log.debug('exists is %s', exists) if upload_public is not None and 'salt://' in upload_public: try: upload_public = __salt__['cp.get_file_str'](upload_public) except IOError as e: log.debug(e) ret['comment'] = 'File {0} not found.'.format(upload_public) ret['result'] = False return ret if not exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be created.'.format(name) ret['result'] = None return ret if save_private and not upload_public: created = __salt__['boto_ec2.create_key']( name, save_private, region, key, keyid, profile ) if created: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['new'] = created else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) elif not save_private and upload_public: imported = __salt__['boto_ec2.import_key'](name, upload_public, region, key, keyid, profile) if imported: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['old'] = None ret['changes']['new'] = imported else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) else: ret['result'] = False ret['comment'] = 'You can either upload or download a private key ' else: ret['result'] = True ret['comment'] = 'The key name {0} already exists'.format(name) return ret
Ensure key pair is present.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_ec2.py#L79-L133
null
# -*- coding: utf-8 -*- ''' Manage EC2 .. versionadded:: 2015.8.0 This module provides an interface to the Elastic Compute Cloud (EC2) service from AWS. The below code creates a key pair: .. code-block:: yaml create-key-pair: boto_ec2.key_present: - name: mykeypair - save_private: /root/ - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: 'ssh-rsa AAAA' - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs You can also use salt:// in order to define the public key. .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: salt://mybase/public_key.pub - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs The below code deletes a key pair: .. code-block:: yaml delete-key-pair: boto_ec2.key_absent: - name: mykeypair - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging from time import time, sleep # Import salt libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.utils.data import salt.utils.dictupdate as dictupdate from salt.exceptions import SaltInvocationError, CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' if 'boto_ec2.get_key' in __salt__: return 'boto_ec2' else: return False def key_absent(name, region=None, key=None, keyid=None, profile=None): ''' Deletes a key pair ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be deleted.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_ec2.delete_key'](name, region, key, keyid, profile) log.debug('exists is %s', deleted) if deleted: ret['result'] = True ret['comment'] = 'The key {0} is deleted.'.format(name) ret['changes']['old'] = name else: ret['result'] = False ret['comment'] = 'Could not delete key {0} '.format(name) else: ret['result'] = True ret['comment'] = 'The key name {0} does not exist'.format(name) return ret def eni_present( name, subnet_id=None, subnet_name=None, private_ip_address=None, description=None, groups=None, source_dest_check=True, allocate_eip=None, arecords=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI exists. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. subnet_id The VPC subnet ID the ENI will exist within. subnet_name The VPC subnet name the ENI will exist within. private_ip_address The private ip address to use for this ENI. If this is not specified AWS will automatically assign a private IP address to the ENI. Must be specified at creation time; will be ignored afterward. description Description of the key. groups A list of security groups to apply to the ENI. source_dest_check Boolean specifying whether source/destination checking is enabled on the ENI. allocate_eip allocate and associate an EIP to the ENI. Could be 'standard' to allocate Elastic IP to EC2 region or 'vpc' to get it for a particular VPC .. versionchanged:: 2016.11.0 arecords A list of arecord dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((subnet_id, subnet_name)): raise SaltInvocationError('One (but not both) of subnet_id or ' 'subnet_name must be provided.') if not groups: raise SaltInvocationError('groups is a required argument.') if not isinstance(groups, list): raise SaltInvocationError('groups must be a list.') if not isinstance(source_dest_check, bool): raise SaltInvocationError('source_dest_check must be a bool.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be created.' if allocate_eip: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated/assocaited to the ENI.']) if arecords: ret['comment'] = ' '.join([ret['comment'], 'A records are set to be created.']) ret['result'] = None return ret result_create = __salt__['boto_ec2.create_network_interface']( name, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, description=description, groups=groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_create: ret['result'] = False ret['comment'] = 'Failed to create ENI: {0}'.format( result_create['error']['message'] ) return ret r['result'] = result_create['result'] ret['comment'] = 'Created ENI {0}'.format(name) ret['changes']['id'] = r['result']['id'] else: _ret = _eni_attribute( r['result'], 'description', description, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = _ret['comment'] if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _eni_groups( r['result'], groups, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret # Actions that need to occur whether creating or updating _ret = _eni_attribute( r['result'], 'source_dest_check', source_dest_check, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret if allocate_eip: if 'allocationId' not in r['result']: if __opts__['test']: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated and assocaited to the ENI.']) else: domain = 'vpc' if allocate_eip == 'vpc' else None eip_alloc = __salt__['boto_ec2.allocate_eip_address'](domain=domain, region=region, key=key, keyid=keyid, profile=profile) if eip_alloc: _ret = __salt__['boto_ec2.associate_eip_address'](instance_id=None, instance_name=None, public_ip=None, allocation_id=eip_alloc['allocation_id'], network_interface_id=r['result']['id'], private_ip_address=None, allow_reassociation=False, region=region, key=key, keyid=keyid, profile=profile) if not _ret: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=eip_alloc['allocation_id'], region=region, key=key, keyid=keyid, profile=profile) ret['result'] = False msg = 'Failed to assocaite the allocated EIP address with the ENI. The EIP {0}'.format('was successfully released.' if _ret else 'was NOT RELEASED.') ret['comment'] = ' '.join([ret['comment'], msg]) return ret else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to allocate an EIP address']) return ret else: ret['comment'] = ' '.join([ret['comment'], 'An EIP is already allocated/assocaited to the ENI']) if arecords: for arecord in arecords: if 'name' not in arecord: msg = 'The arecord must contain a "name" property.' raise SaltInvocationError(msg) log.debug('processing arecord %s', arecord) _ret = None dns_provider = 'boto_route53' arecord['record_type'] = 'A' public_ip_arecord = False if 'public' in arecord: public_ip_arecord = arecord.pop('public') if public_ip_arecord: if 'publicIp' in r['result']: arecord['value'] = r['result']['publicIp'] elif 'public_ip' in eip_alloc: arecord['value'] = eip_alloc['public_ip'] else: msg = 'Unable to add an A record for the public IP address, a public IP address does not seem to be allocated to this ENI.' raise CommandExecutionError(msg) else: arecord['value'] = r['result']['private_ip_address'] if 'provider' in arecord: dns_provider = arecord.pop('provider') if dns_provider == 'boto_route53': if 'profile' not in arecord: arecord['profile'] = profile if 'key' not in arecord: arecord['key'] = key if 'keyid' not in arecord: arecord['keyid'] = keyid if 'region' not in arecord: arecord['region'] = region _ret = __states__['.'.join([dns_provider, 'present'])](**arecord) log.debug('ret from dns_provider.present = %s', _ret) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret return ret def _eni_attribute(metadata, attr, value, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if metadata[attr] == value: return ret if __opts__['test']: ret['comment'] = 'ENI set to have {0} updated.'.format(attr) ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr=attr, value=value, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI {0}: {1}.' ret['result'] = False ret['comment'] = msg.format(attr, result_update['error']['message']) else: ret['comment'] = 'Updated ENI {0}.'.format(attr) ret['changes'][attr] = { 'old': metadata[attr], 'new': value } return ret def _eni_groups(metadata, groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} group_ids = [g['id'] for g in metadata['groups']] group_ids.sort() _groups = __salt__['boto_secgroup.convert_to_group_ids']( groups, vpc_id=metadata['vpc_id'], region=region, key=key, keyid=keyid, profile=profile ) if not _groups: ret['comment'] = 'Could not find secgroup ids for provided groups.' ret['result'] = False _groups.sort() if group_ids == _groups: return ret if __opts__['test']: ret['comment'] = 'ENI set to have groups updated.' ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr='groups', value=_groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI groups: {1}.' ret['result'] = False ret['comment'] = msg.format(result_update['error']['message']) else: ret['comment'] = 'Updated ENI groups.' ret['changes']['groups'] = { 'old': group_ids, 'new': _groups } return ret def eni_absent( name, release_eip=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI is absent. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. release_eip True/False - release any EIP associated with the ENI region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' ret['result'] = None return ret else: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' if release_eip and 'allocationId' in r['result']: ret['comment'] = ' '.join([ret['comment'], 'Allocated/associated EIP is set to be released']) ret['result'] = None return ret if 'id' in r['result']['attachment']: result_detach = __salt__['boto_ec2.detach_network_interface']( name=name, force=True, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_detach: ret['result'] = False ret['comment'] = 'Failed to detach ENI: {0}'.format( result_detach['error']['message'] ) return ret # TODO: Ensure the detach occurs before continuing result_delete = __salt__['boto_ec2.delete_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_delete: ret['result'] = False ret['comment'] = 'Failed to delete ENI: {0}'.format( result_delete['error']['message'] ) return ret ret['comment'] = 'Deleted ENI {0}'.format(name) ret['changes']['id'] = None if release_eip and 'allocationId' in r['result']: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=r['result']['allocationId'], region=region, key=key, keyid=keyid, profile=profile) if not _ret: ret['comment'] = ' '.join([ret['comment'], 'Failed to release EIP allocated to the ENI.']) ret['result'] = False return ret else: ret['comment'] = ' '.join([ret['comment'], 'EIP released.']) ret['changes']['eip released'] = True return ret def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs): ''' Create a snapshot from the given instance .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs): ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name) ret['result'] = False return ret ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name) ret['changes']['new'] = {ami_name: ami_name} if not wait_until_available: return ret starttime = time() while True: images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs) if images and images[0].state == 'available': break if time() - starttime > wait_timeout_seconds: if images: ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state) else: ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name) ret['result'] = False return ret sleep(5) return ret def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.data.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instances: _create = True elif len(instances) > 1: log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id') instance_id = None # No way to know, we'll just have to bail later.... else: instance_id = instances[0] if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if image_ids: image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address %s allocated.", r['public_ip']) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info( 'Waiting up to %s secs for new EIP %s to become available', tries * secs, public_ip or allocation_id ) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: if 'new' not in ret['changes']: ret['changes']['new'] = {} ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) curr = {} if not isinstance(curr, dict) else curr if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True if tags and instance_id is not None: tags = dict(tags) curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id}, region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {})) current = set(curr_tags.keys()) desired = set(tags.keys()) remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set... add = dict([(t, tags[t]) for t in desired - current]) replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)]) # Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative. add.update(replace) if add or remove: if __opts__['test']: ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if instance_name else name) else: if remove: if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while deleting tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret if add: if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while creating tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags return ret def instance_absent(name, instance_name=None, instance_id=None, release_eip=False, region=None, key=None, keyid=None, profile=None, filters=None): ''' Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12 ''' ### TODO - Implement 'force' option?? Would automagically turn off ### 'disableApiTermination', as needed, before trying to delete. ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not instance_id: try: instance_id = __salt__['boto_ec2.get_id'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states, filters=filters) except CommandExecutionError as e: ret['result'] = None ret['comment'] = ("Couldn't determine current status of instance " "{0}.".format(instance_name or name)) return ret instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, return_objs=True, filters=filters) if not instances: ret['result'] = True ret['comment'] = 'Instance {0} is already gone.'.format(instance_id) return ret instance = instances[0] ### Honor 'disableApiTermination' - if you want to override it, first use set_attribute() to turn it off no_can_do = __salt__['boto_ec2.get_attribute']('disableApiTermination', instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) if no_can_do.get('disableApiTermination') is True: ret['result'] = False ret['comment'] = 'Termination of instance {0} via the API is disabled.'.format(instance_id) return ret if __opts__['test']: ret['comment'] = 'The instance {0} is set to be deleted.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.terminate'](instance_id=instance_id, name=instance_name, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to terminate instance {0}.'.format(instance_id) return ret ret['changes']['old'] = {'instance_id': instance_id} ret['changes']['new'] = None if release_eip: ip = getattr(instance, 'ip_address', None) if ip: base_args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} public_ip = None alloc_id = None assoc_id = None if getattr(instance, 'vpc_id', None): r = __salt__['boto_ec2.get_eip_address_info'](addresses=ip, **base_args) if r and 'allocation_id' in r[0]: alloc_id = r[0]['allocation_id'] assoc_id = r[0].get('association_id') else: # I /believe/ this situation is impossible but let's hedge our bets... ret['result'] = False ret['comment'] = "Can't determine AllocationId for address {0}.".format(ip) return ret else: public_ip = instance.ip_address if assoc_id: # Race here - sometimes the terminate above will already have dropped this if not __salt__['boto_ec2.disassociate_eip_address'](association_id=assoc_id, **base_args): log.warning("Failed to disassociate EIP %s.", ip) if __salt__['boto_ec2.release_eip_address'](allocation_id=alloc_id, public_ip=public_ip, **base_args): log.info("Released EIP address %s", public_ip or r[0]['public_ip']) ret['changes']['old']['public_ip'] = public_ip or r[0]['public_ip'] else: ret['result'] = False ret['comment'] = "Failed to release EIP {0}.".format(ip) return ret return ret def volume_absent(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is detached and absent. .. versionadded:: 2016.11.0 name State definition name. volume_name Name tag associated with the volume. For safety, if this matches more than one volume, the state will refuse to apply. volume_id Resource ID of the volume. instance_name Only remove volume if it is attached to instance with this Name tag. Exclusive with 'instance_id'. Requires 'device'. instance_id Only remove volume if it is attached to this instance. Exclusive with 'instance_name'. Requires 'device'. device Match by device rather than ID. Requires one of 'instance_name' or 'instance_id'. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } filters = {} running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id, instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " "'instance_name', or 'instance_id' must be provided.") if (instance_name or instance_id) and not device: raise SaltInvocationError("Parameter 'device' is required when either " "'instance_name' or 'instance_id' is specified.") if volume_id: filters.update({'volume-id': volume_id}) if volume_name: filters.update({'tag:Name': volume_name}) if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instance_id: ret['comment'] = ('Instance with Name {0} not found. Assuming ' 'associated volumes gone.'.format(instance_name)) return ret if instance_id: filters.update({'attachment.instance-id': instance_id}) if device: filters.update({'attachment.device': device}) args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if not vols: ret['comment'] = 'Volume matching criteria not found, assuming already absent' return ret if len(vols) > 1: msg = "More than one volume matched criteria, can't continue in state {0}".format(name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret vol = vols[0] log.info('Matched Volume ID %s', vol) if __opts__['test']: ret['comment'] = 'The volume {0} is set to be deleted.'.format(vol) ret['result'] = None return ret if __salt__['boto_ec2.delete_volume'](volume_id=vol, force=True, **args): ret['comment'] = 'Volume {0} deleted.'.format(vol) ret['changes'] = {'old': {'volume_id': vol}, 'new': {'volume_id': None}} else: ret['comment'] = 'Error deleting volume {0}.'.format(vol) ret['result'] = False return ret def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ''' Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags would be updated.' ret['changes'] = r['changes'] ret['result'] = None else: ret['comment'] = 'Error validating requested volume tags.' ret['result'] = False return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret def volume_present(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} old_dict = {} new_dict = {} running_states = ('running', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " " must be provided.") if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'instance_name', or 'instance_id'" " must be provided.") if device is None: raise SaltInvocationError("Parameter 'device' is required.") args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, in_states=running_states, **args) if not instance_id: raise SaltInvocationError('Instance with Name {0} not found.'.format(instance_name)) instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, return_objs=True, **args) instance = instances[0] if volume_name: filters = {} filters.update({'tag:Name': volume_name}) vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if len(vols) > 1: msg = "More than one volume matched volume name {0}, can't continue in state {1}".format(volume_name, name) raise SaltInvocationError(msg) if not vols: if __opts__['test']: ret['comment'] = ('The volume with name {0} is set to be created and attached' ' on {1}({2}).'.format(volume_name, instance_id, device)) ret['result'] = None return ret _rt = __salt__['boto_ec2.create_volume'](zone_name=instance.placement, size=size, snapshot_id=snapshot_id, volume_type=volume_type, iops=iops, encrypted=encrypted, kms_key_id=kms_key_id, wait_for_creation=True, **args) if 'result' in _rt: volume_id = _rt['result'] else: raise SaltInvocationError('Error creating volume with name {0}.'.format(volume_name)) _rt = __salt__['boto_ec2.set_volumes_tags'](tag_maps=[{ 'filters': {'volume_ids': [volume_id]}, 'tags': {'Name': volume_name} }], **args) if _rt['success'] is False: raise SaltInvocationError('Error updating requested volume ' '{0} with name {1}. {2}'.format(volume_id, volume_name, _rt['comment'])) old_dict['volume_id'] = None new_dict['volume_id'] = volume_id else: volume_id = vols[0] vols = __salt__['boto_ec2.get_all_volumes'](volume_ids=[volume_id], return_objs=True, **args) if not vols: raise SaltInvocationError('Volume {0} do not exist'.format(volume_id)) vol = vols[0] if vol.zone != instance.placement: raise SaltInvocationError(('Volume {0} in {1} cannot attach to instance' ' {2} in {3}.').format(volume_id, vol.zone, instance_id, instance.placement)) attach_data = vol.attach_data if attach_data is not None and attach_data.instance_id is not None: if instance_id == attach_data.instance_id and device == attach_data.device: ret['comment'] = 'The volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device) return ret else: if __opts__['test']: ret['comment'] = ('The volume {0} is set to be detached' ' from {1}({2} and attached on {3}({4}).').format(attach_data.instance_id, attach_data.devic, volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.detach_volume'](volume_id=volume_id, wait_for_detachement=True, **args): ret['comment'] = 'Volume {0} is detached from {1}({2}).'.format(volume_id, attach_data.instance_id, attach_data.device) old_dict['instance_id'] = attach_data.instance_id old_dict['device'] = attach_data.device else: raise SaltInvocationError(('The volume {0} is already attached on instance {1}({2}).' ' Failed to detach').format(volume_id, attach_data.instance_id, attach_data.device)) else: old_dict['instance_id'] = instance_id old_dict['device'] = None if __opts__['test']: ret['comment'] = 'The volume {0} is set to be attached on {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.attach_volume'](volume_id=volume_id, instance_id=instance_id, device=device, **args): ret['comment'] = ' '.join([ ret['comment'], 'Volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device)]) new_dict['instance_id'] = instance_id new_dict['device'] = device ret['changes'] = {'old': old_dict, 'new': new_dict} else: ret['comment'] = 'Error attaching volume {0} to instance {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = False return ret def private_ips_present(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, allow_reassignment=False, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI has secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be present on the ENI. allow_reassignment (Boolean) - If true, will reassign a secondary private ip address associated with another ENI. If false, state will fail if the secondary private ip address is associated with another ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to associate with the " "ENI") ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'old': [], 'new': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any new secondary private ips to add to the eni if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) ips_to_add = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['old']: ips_to_add.append(private_ip) if ips_to_add: if not __opts__['test']: # Assign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_add, 'allow_reassignment': allow_reassignment, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.assign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly assigned to ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_added = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['new']: ips_not_added.append(private_ip) # Display results if ips_not_added: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to add: {1}\n' 'could not add the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_add), '\n\t- ' + '\n\t- '.join(ips_not_added))) else: ret['comment'] = "added ips: {0}".format( '\n\t- ' + '\n\t- '.join(ips_to_add)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to add ret['comment'] = ('ips on eni: {0}\n' 'ips that would be added: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_add))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on eni: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret def private_ips_absent(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI does not have secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be absent on the ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to unassociate with " "the ENI") if not isinstance(private_ip_addresses, list): private_ip_addresses = [private_ip_addresses] ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'new': [], 'old': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any old private ips to remove from the eni primary_private_ip = None if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) if eni_pip['primary']: primary_private_ip = eni_pip['private_ip_address'] ips_to_remove = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['old']: ips_to_remove.append(private_ip) if private_ip == primary_private_ip: ret['result'] = False ret['comment'] = ('You cannot unassign the primary private ip address ({0}) on an ' 'eni\n' 'ips on eni: {1}\n' 'attempted to remove: {2}\n'.format( primary_private_ip, '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(private_ip_addresses))) ret['changes'] = {} return ret if ips_to_remove: if not __opts__['test']: # Unassign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_remove, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.unassign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly unassigned from ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_removed = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['new']: ips_not_removed.append(private_ip) if ips_not_removed: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to remove: {1}\n' 'could not remove the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_remove), '\n\t- ' + '\n\t- '.join(ips_not_removed))) else: ret['comment'] = "removed ips: {0}".format('\n\t- ' + '\n\t- '.join(ips_to_remove)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to remove ret['comment'] = ('ips on eni: {0}\n' 'ips that would be removed: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_remove))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on network interface: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret
saltstack/salt
salt/states/boto_ec2.py
key_absent
python
def key_absent(name, region=None, key=None, keyid=None, profile=None): ''' Deletes a key pair ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be deleted.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_ec2.delete_key'](name, region, key, keyid, profile) log.debug('exists is %s', deleted) if deleted: ret['result'] = True ret['comment'] = 'The key {0} is deleted.'.format(name) ret['changes']['old'] = name else: ret['result'] = False ret['comment'] = 'Could not delete key {0} '.format(name) else: ret['result'] = True ret['comment'] = 'The key name {0} does not exist'.format(name) return ret
Deletes a key pair
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_ec2.py#L136-L165
null
# -*- coding: utf-8 -*- ''' Manage EC2 .. versionadded:: 2015.8.0 This module provides an interface to the Elastic Compute Cloud (EC2) service from AWS. The below code creates a key pair: .. code-block:: yaml create-key-pair: boto_ec2.key_present: - name: mykeypair - save_private: /root/ - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: 'ssh-rsa AAAA' - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs You can also use salt:// in order to define the public key. .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: salt://mybase/public_key.pub - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs The below code deletes a key pair: .. code-block:: yaml delete-key-pair: boto_ec2.key_absent: - name: mykeypair - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging from time import time, sleep # Import salt libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.utils.data import salt.utils.dictupdate as dictupdate from salt.exceptions import SaltInvocationError, CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' if 'boto_ec2.get_key' in __salt__: return 'boto_ec2' else: return False def key_present(name, save_private=None, upload_public=None, region=None, key=None, keyid=None, profile=None): ''' Ensure key pair is present. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) log.debug('exists is %s', exists) if upload_public is not None and 'salt://' in upload_public: try: upload_public = __salt__['cp.get_file_str'](upload_public) except IOError as e: log.debug(e) ret['comment'] = 'File {0} not found.'.format(upload_public) ret['result'] = False return ret if not exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be created.'.format(name) ret['result'] = None return ret if save_private and not upload_public: created = __salt__['boto_ec2.create_key']( name, save_private, region, key, keyid, profile ) if created: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['new'] = created else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) elif not save_private and upload_public: imported = __salt__['boto_ec2.import_key'](name, upload_public, region, key, keyid, profile) if imported: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['old'] = None ret['changes']['new'] = imported else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) else: ret['result'] = False ret['comment'] = 'You can either upload or download a private key ' else: ret['result'] = True ret['comment'] = 'The key name {0} already exists'.format(name) return ret def eni_present( name, subnet_id=None, subnet_name=None, private_ip_address=None, description=None, groups=None, source_dest_check=True, allocate_eip=None, arecords=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI exists. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. subnet_id The VPC subnet ID the ENI will exist within. subnet_name The VPC subnet name the ENI will exist within. private_ip_address The private ip address to use for this ENI. If this is not specified AWS will automatically assign a private IP address to the ENI. Must be specified at creation time; will be ignored afterward. description Description of the key. groups A list of security groups to apply to the ENI. source_dest_check Boolean specifying whether source/destination checking is enabled on the ENI. allocate_eip allocate and associate an EIP to the ENI. Could be 'standard' to allocate Elastic IP to EC2 region or 'vpc' to get it for a particular VPC .. versionchanged:: 2016.11.0 arecords A list of arecord dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((subnet_id, subnet_name)): raise SaltInvocationError('One (but not both) of subnet_id or ' 'subnet_name must be provided.') if not groups: raise SaltInvocationError('groups is a required argument.') if not isinstance(groups, list): raise SaltInvocationError('groups must be a list.') if not isinstance(source_dest_check, bool): raise SaltInvocationError('source_dest_check must be a bool.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be created.' if allocate_eip: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated/assocaited to the ENI.']) if arecords: ret['comment'] = ' '.join([ret['comment'], 'A records are set to be created.']) ret['result'] = None return ret result_create = __salt__['boto_ec2.create_network_interface']( name, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, description=description, groups=groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_create: ret['result'] = False ret['comment'] = 'Failed to create ENI: {0}'.format( result_create['error']['message'] ) return ret r['result'] = result_create['result'] ret['comment'] = 'Created ENI {0}'.format(name) ret['changes']['id'] = r['result']['id'] else: _ret = _eni_attribute( r['result'], 'description', description, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = _ret['comment'] if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _eni_groups( r['result'], groups, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret # Actions that need to occur whether creating or updating _ret = _eni_attribute( r['result'], 'source_dest_check', source_dest_check, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret if allocate_eip: if 'allocationId' not in r['result']: if __opts__['test']: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated and assocaited to the ENI.']) else: domain = 'vpc' if allocate_eip == 'vpc' else None eip_alloc = __salt__['boto_ec2.allocate_eip_address'](domain=domain, region=region, key=key, keyid=keyid, profile=profile) if eip_alloc: _ret = __salt__['boto_ec2.associate_eip_address'](instance_id=None, instance_name=None, public_ip=None, allocation_id=eip_alloc['allocation_id'], network_interface_id=r['result']['id'], private_ip_address=None, allow_reassociation=False, region=region, key=key, keyid=keyid, profile=profile) if not _ret: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=eip_alloc['allocation_id'], region=region, key=key, keyid=keyid, profile=profile) ret['result'] = False msg = 'Failed to assocaite the allocated EIP address with the ENI. The EIP {0}'.format('was successfully released.' if _ret else 'was NOT RELEASED.') ret['comment'] = ' '.join([ret['comment'], msg]) return ret else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to allocate an EIP address']) return ret else: ret['comment'] = ' '.join([ret['comment'], 'An EIP is already allocated/assocaited to the ENI']) if arecords: for arecord in arecords: if 'name' not in arecord: msg = 'The arecord must contain a "name" property.' raise SaltInvocationError(msg) log.debug('processing arecord %s', arecord) _ret = None dns_provider = 'boto_route53' arecord['record_type'] = 'A' public_ip_arecord = False if 'public' in arecord: public_ip_arecord = arecord.pop('public') if public_ip_arecord: if 'publicIp' in r['result']: arecord['value'] = r['result']['publicIp'] elif 'public_ip' in eip_alloc: arecord['value'] = eip_alloc['public_ip'] else: msg = 'Unable to add an A record for the public IP address, a public IP address does not seem to be allocated to this ENI.' raise CommandExecutionError(msg) else: arecord['value'] = r['result']['private_ip_address'] if 'provider' in arecord: dns_provider = arecord.pop('provider') if dns_provider == 'boto_route53': if 'profile' not in arecord: arecord['profile'] = profile if 'key' not in arecord: arecord['key'] = key if 'keyid' not in arecord: arecord['keyid'] = keyid if 'region' not in arecord: arecord['region'] = region _ret = __states__['.'.join([dns_provider, 'present'])](**arecord) log.debug('ret from dns_provider.present = %s', _ret) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret return ret def _eni_attribute(metadata, attr, value, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if metadata[attr] == value: return ret if __opts__['test']: ret['comment'] = 'ENI set to have {0} updated.'.format(attr) ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr=attr, value=value, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI {0}: {1}.' ret['result'] = False ret['comment'] = msg.format(attr, result_update['error']['message']) else: ret['comment'] = 'Updated ENI {0}.'.format(attr) ret['changes'][attr] = { 'old': metadata[attr], 'new': value } return ret def _eni_groups(metadata, groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} group_ids = [g['id'] for g in metadata['groups']] group_ids.sort() _groups = __salt__['boto_secgroup.convert_to_group_ids']( groups, vpc_id=metadata['vpc_id'], region=region, key=key, keyid=keyid, profile=profile ) if not _groups: ret['comment'] = 'Could not find secgroup ids for provided groups.' ret['result'] = False _groups.sort() if group_ids == _groups: return ret if __opts__['test']: ret['comment'] = 'ENI set to have groups updated.' ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr='groups', value=_groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI groups: {1}.' ret['result'] = False ret['comment'] = msg.format(result_update['error']['message']) else: ret['comment'] = 'Updated ENI groups.' ret['changes']['groups'] = { 'old': group_ids, 'new': _groups } return ret def eni_absent( name, release_eip=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI is absent. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. release_eip True/False - release any EIP associated with the ENI region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' ret['result'] = None return ret else: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' if release_eip and 'allocationId' in r['result']: ret['comment'] = ' '.join([ret['comment'], 'Allocated/associated EIP is set to be released']) ret['result'] = None return ret if 'id' in r['result']['attachment']: result_detach = __salt__['boto_ec2.detach_network_interface']( name=name, force=True, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_detach: ret['result'] = False ret['comment'] = 'Failed to detach ENI: {0}'.format( result_detach['error']['message'] ) return ret # TODO: Ensure the detach occurs before continuing result_delete = __salt__['boto_ec2.delete_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_delete: ret['result'] = False ret['comment'] = 'Failed to delete ENI: {0}'.format( result_delete['error']['message'] ) return ret ret['comment'] = 'Deleted ENI {0}'.format(name) ret['changes']['id'] = None if release_eip and 'allocationId' in r['result']: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=r['result']['allocationId'], region=region, key=key, keyid=keyid, profile=profile) if not _ret: ret['comment'] = ' '.join([ret['comment'], 'Failed to release EIP allocated to the ENI.']) ret['result'] = False return ret else: ret['comment'] = ' '.join([ret['comment'], 'EIP released.']) ret['changes']['eip released'] = True return ret def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs): ''' Create a snapshot from the given instance .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs): ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name) ret['result'] = False return ret ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name) ret['changes']['new'] = {ami_name: ami_name} if not wait_until_available: return ret starttime = time() while True: images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs) if images and images[0].state == 'available': break if time() - starttime > wait_timeout_seconds: if images: ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state) else: ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name) ret['result'] = False return ret sleep(5) return ret def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.data.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instances: _create = True elif len(instances) > 1: log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id') instance_id = None # No way to know, we'll just have to bail later.... else: instance_id = instances[0] if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if image_ids: image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address %s allocated.", r['public_ip']) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info( 'Waiting up to %s secs for new EIP %s to become available', tries * secs, public_ip or allocation_id ) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: if 'new' not in ret['changes']: ret['changes']['new'] = {} ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) curr = {} if not isinstance(curr, dict) else curr if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True if tags and instance_id is not None: tags = dict(tags) curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id}, region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {})) current = set(curr_tags.keys()) desired = set(tags.keys()) remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set... add = dict([(t, tags[t]) for t in desired - current]) replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)]) # Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative. add.update(replace) if add or remove: if __opts__['test']: ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if instance_name else name) else: if remove: if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while deleting tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret if add: if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while creating tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags return ret def instance_absent(name, instance_name=None, instance_id=None, release_eip=False, region=None, key=None, keyid=None, profile=None, filters=None): ''' Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12 ''' ### TODO - Implement 'force' option?? Would automagically turn off ### 'disableApiTermination', as needed, before trying to delete. ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not instance_id: try: instance_id = __salt__['boto_ec2.get_id'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states, filters=filters) except CommandExecutionError as e: ret['result'] = None ret['comment'] = ("Couldn't determine current status of instance " "{0}.".format(instance_name or name)) return ret instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, return_objs=True, filters=filters) if not instances: ret['result'] = True ret['comment'] = 'Instance {0} is already gone.'.format(instance_id) return ret instance = instances[0] ### Honor 'disableApiTermination' - if you want to override it, first use set_attribute() to turn it off no_can_do = __salt__['boto_ec2.get_attribute']('disableApiTermination', instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) if no_can_do.get('disableApiTermination') is True: ret['result'] = False ret['comment'] = 'Termination of instance {0} via the API is disabled.'.format(instance_id) return ret if __opts__['test']: ret['comment'] = 'The instance {0} is set to be deleted.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.terminate'](instance_id=instance_id, name=instance_name, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to terminate instance {0}.'.format(instance_id) return ret ret['changes']['old'] = {'instance_id': instance_id} ret['changes']['new'] = None if release_eip: ip = getattr(instance, 'ip_address', None) if ip: base_args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} public_ip = None alloc_id = None assoc_id = None if getattr(instance, 'vpc_id', None): r = __salt__['boto_ec2.get_eip_address_info'](addresses=ip, **base_args) if r and 'allocation_id' in r[0]: alloc_id = r[0]['allocation_id'] assoc_id = r[0].get('association_id') else: # I /believe/ this situation is impossible but let's hedge our bets... ret['result'] = False ret['comment'] = "Can't determine AllocationId for address {0}.".format(ip) return ret else: public_ip = instance.ip_address if assoc_id: # Race here - sometimes the terminate above will already have dropped this if not __salt__['boto_ec2.disassociate_eip_address'](association_id=assoc_id, **base_args): log.warning("Failed to disassociate EIP %s.", ip) if __salt__['boto_ec2.release_eip_address'](allocation_id=alloc_id, public_ip=public_ip, **base_args): log.info("Released EIP address %s", public_ip or r[0]['public_ip']) ret['changes']['old']['public_ip'] = public_ip or r[0]['public_ip'] else: ret['result'] = False ret['comment'] = "Failed to release EIP {0}.".format(ip) return ret return ret def volume_absent(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is detached and absent. .. versionadded:: 2016.11.0 name State definition name. volume_name Name tag associated with the volume. For safety, if this matches more than one volume, the state will refuse to apply. volume_id Resource ID of the volume. instance_name Only remove volume if it is attached to instance with this Name tag. Exclusive with 'instance_id'. Requires 'device'. instance_id Only remove volume if it is attached to this instance. Exclusive with 'instance_name'. Requires 'device'. device Match by device rather than ID. Requires one of 'instance_name' or 'instance_id'. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } filters = {} running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id, instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " "'instance_name', or 'instance_id' must be provided.") if (instance_name or instance_id) and not device: raise SaltInvocationError("Parameter 'device' is required when either " "'instance_name' or 'instance_id' is specified.") if volume_id: filters.update({'volume-id': volume_id}) if volume_name: filters.update({'tag:Name': volume_name}) if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instance_id: ret['comment'] = ('Instance with Name {0} not found. Assuming ' 'associated volumes gone.'.format(instance_name)) return ret if instance_id: filters.update({'attachment.instance-id': instance_id}) if device: filters.update({'attachment.device': device}) args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if not vols: ret['comment'] = 'Volume matching criteria not found, assuming already absent' return ret if len(vols) > 1: msg = "More than one volume matched criteria, can't continue in state {0}".format(name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret vol = vols[0] log.info('Matched Volume ID %s', vol) if __opts__['test']: ret['comment'] = 'The volume {0} is set to be deleted.'.format(vol) ret['result'] = None return ret if __salt__['boto_ec2.delete_volume'](volume_id=vol, force=True, **args): ret['comment'] = 'Volume {0} deleted.'.format(vol) ret['changes'] = {'old': {'volume_id': vol}, 'new': {'volume_id': None}} else: ret['comment'] = 'Error deleting volume {0}.'.format(vol) ret['result'] = False return ret def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ''' Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags would be updated.' ret['changes'] = r['changes'] ret['result'] = None else: ret['comment'] = 'Error validating requested volume tags.' ret['result'] = False return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret def volume_present(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} old_dict = {} new_dict = {} running_states = ('running', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " " must be provided.") if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'instance_name', or 'instance_id'" " must be provided.") if device is None: raise SaltInvocationError("Parameter 'device' is required.") args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, in_states=running_states, **args) if not instance_id: raise SaltInvocationError('Instance with Name {0} not found.'.format(instance_name)) instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, return_objs=True, **args) instance = instances[0] if volume_name: filters = {} filters.update({'tag:Name': volume_name}) vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if len(vols) > 1: msg = "More than one volume matched volume name {0}, can't continue in state {1}".format(volume_name, name) raise SaltInvocationError(msg) if not vols: if __opts__['test']: ret['comment'] = ('The volume with name {0} is set to be created and attached' ' on {1}({2}).'.format(volume_name, instance_id, device)) ret['result'] = None return ret _rt = __salt__['boto_ec2.create_volume'](zone_name=instance.placement, size=size, snapshot_id=snapshot_id, volume_type=volume_type, iops=iops, encrypted=encrypted, kms_key_id=kms_key_id, wait_for_creation=True, **args) if 'result' in _rt: volume_id = _rt['result'] else: raise SaltInvocationError('Error creating volume with name {0}.'.format(volume_name)) _rt = __salt__['boto_ec2.set_volumes_tags'](tag_maps=[{ 'filters': {'volume_ids': [volume_id]}, 'tags': {'Name': volume_name} }], **args) if _rt['success'] is False: raise SaltInvocationError('Error updating requested volume ' '{0} with name {1}. {2}'.format(volume_id, volume_name, _rt['comment'])) old_dict['volume_id'] = None new_dict['volume_id'] = volume_id else: volume_id = vols[0] vols = __salt__['boto_ec2.get_all_volumes'](volume_ids=[volume_id], return_objs=True, **args) if not vols: raise SaltInvocationError('Volume {0} do not exist'.format(volume_id)) vol = vols[0] if vol.zone != instance.placement: raise SaltInvocationError(('Volume {0} in {1} cannot attach to instance' ' {2} in {3}.').format(volume_id, vol.zone, instance_id, instance.placement)) attach_data = vol.attach_data if attach_data is not None and attach_data.instance_id is not None: if instance_id == attach_data.instance_id and device == attach_data.device: ret['comment'] = 'The volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device) return ret else: if __opts__['test']: ret['comment'] = ('The volume {0} is set to be detached' ' from {1}({2} and attached on {3}({4}).').format(attach_data.instance_id, attach_data.devic, volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.detach_volume'](volume_id=volume_id, wait_for_detachement=True, **args): ret['comment'] = 'Volume {0} is detached from {1}({2}).'.format(volume_id, attach_data.instance_id, attach_data.device) old_dict['instance_id'] = attach_data.instance_id old_dict['device'] = attach_data.device else: raise SaltInvocationError(('The volume {0} is already attached on instance {1}({2}).' ' Failed to detach').format(volume_id, attach_data.instance_id, attach_data.device)) else: old_dict['instance_id'] = instance_id old_dict['device'] = None if __opts__['test']: ret['comment'] = 'The volume {0} is set to be attached on {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.attach_volume'](volume_id=volume_id, instance_id=instance_id, device=device, **args): ret['comment'] = ' '.join([ ret['comment'], 'Volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device)]) new_dict['instance_id'] = instance_id new_dict['device'] = device ret['changes'] = {'old': old_dict, 'new': new_dict} else: ret['comment'] = 'Error attaching volume {0} to instance {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = False return ret def private_ips_present(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, allow_reassignment=False, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI has secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be present on the ENI. allow_reassignment (Boolean) - If true, will reassign a secondary private ip address associated with another ENI. If false, state will fail if the secondary private ip address is associated with another ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to associate with the " "ENI") ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'old': [], 'new': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any new secondary private ips to add to the eni if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) ips_to_add = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['old']: ips_to_add.append(private_ip) if ips_to_add: if not __opts__['test']: # Assign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_add, 'allow_reassignment': allow_reassignment, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.assign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly assigned to ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_added = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['new']: ips_not_added.append(private_ip) # Display results if ips_not_added: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to add: {1}\n' 'could not add the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_add), '\n\t- ' + '\n\t- '.join(ips_not_added))) else: ret['comment'] = "added ips: {0}".format( '\n\t- ' + '\n\t- '.join(ips_to_add)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to add ret['comment'] = ('ips on eni: {0}\n' 'ips that would be added: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_add))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on eni: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret def private_ips_absent(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI does not have secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be absent on the ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to unassociate with " "the ENI") if not isinstance(private_ip_addresses, list): private_ip_addresses = [private_ip_addresses] ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'new': [], 'old': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any old private ips to remove from the eni primary_private_ip = None if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) if eni_pip['primary']: primary_private_ip = eni_pip['private_ip_address'] ips_to_remove = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['old']: ips_to_remove.append(private_ip) if private_ip == primary_private_ip: ret['result'] = False ret['comment'] = ('You cannot unassign the primary private ip address ({0}) on an ' 'eni\n' 'ips on eni: {1}\n' 'attempted to remove: {2}\n'.format( primary_private_ip, '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(private_ip_addresses))) ret['changes'] = {} return ret if ips_to_remove: if not __opts__['test']: # Unassign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_remove, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.unassign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly unassigned from ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_removed = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['new']: ips_not_removed.append(private_ip) if ips_not_removed: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to remove: {1}\n' 'could not remove the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_remove), '\n\t- ' + '\n\t- '.join(ips_not_removed))) else: ret['comment'] = "removed ips: {0}".format('\n\t- ' + '\n\t- '.join(ips_to_remove)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to remove ret['comment'] = ('ips on eni: {0}\n' 'ips that would be removed: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_remove))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on network interface: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret
saltstack/salt
salt/states/boto_ec2.py
eni_present
python
def eni_present( name, subnet_id=None, subnet_name=None, private_ip_address=None, description=None, groups=None, source_dest_check=True, allocate_eip=None, arecords=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI exists. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. subnet_id The VPC subnet ID the ENI will exist within. subnet_name The VPC subnet name the ENI will exist within. private_ip_address The private ip address to use for this ENI. If this is not specified AWS will automatically assign a private IP address to the ENI. Must be specified at creation time; will be ignored afterward. description Description of the key. groups A list of security groups to apply to the ENI. source_dest_check Boolean specifying whether source/destination checking is enabled on the ENI. allocate_eip allocate and associate an EIP to the ENI. Could be 'standard' to allocate Elastic IP to EC2 region or 'vpc' to get it for a particular VPC .. versionchanged:: 2016.11.0 arecords A list of arecord dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((subnet_id, subnet_name)): raise SaltInvocationError('One (but not both) of subnet_id or ' 'subnet_name must be provided.') if not groups: raise SaltInvocationError('groups is a required argument.') if not isinstance(groups, list): raise SaltInvocationError('groups must be a list.') if not isinstance(source_dest_check, bool): raise SaltInvocationError('source_dest_check must be a bool.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be created.' if allocate_eip: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated/assocaited to the ENI.']) if arecords: ret['comment'] = ' '.join([ret['comment'], 'A records are set to be created.']) ret['result'] = None return ret result_create = __salt__['boto_ec2.create_network_interface']( name, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, description=description, groups=groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_create: ret['result'] = False ret['comment'] = 'Failed to create ENI: {0}'.format( result_create['error']['message'] ) return ret r['result'] = result_create['result'] ret['comment'] = 'Created ENI {0}'.format(name) ret['changes']['id'] = r['result']['id'] else: _ret = _eni_attribute( r['result'], 'description', description, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = _ret['comment'] if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _eni_groups( r['result'], groups, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret # Actions that need to occur whether creating or updating _ret = _eni_attribute( r['result'], 'source_dest_check', source_dest_check, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret if allocate_eip: if 'allocationId' not in r['result']: if __opts__['test']: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated and assocaited to the ENI.']) else: domain = 'vpc' if allocate_eip == 'vpc' else None eip_alloc = __salt__['boto_ec2.allocate_eip_address'](domain=domain, region=region, key=key, keyid=keyid, profile=profile) if eip_alloc: _ret = __salt__['boto_ec2.associate_eip_address'](instance_id=None, instance_name=None, public_ip=None, allocation_id=eip_alloc['allocation_id'], network_interface_id=r['result']['id'], private_ip_address=None, allow_reassociation=False, region=region, key=key, keyid=keyid, profile=profile) if not _ret: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=eip_alloc['allocation_id'], region=region, key=key, keyid=keyid, profile=profile) ret['result'] = False msg = 'Failed to assocaite the allocated EIP address with the ENI. The EIP {0}'.format('was successfully released.' if _ret else 'was NOT RELEASED.') ret['comment'] = ' '.join([ret['comment'], msg]) return ret else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to allocate an EIP address']) return ret else: ret['comment'] = ' '.join([ret['comment'], 'An EIP is already allocated/assocaited to the ENI']) if arecords: for arecord in arecords: if 'name' not in arecord: msg = 'The arecord must contain a "name" property.' raise SaltInvocationError(msg) log.debug('processing arecord %s', arecord) _ret = None dns_provider = 'boto_route53' arecord['record_type'] = 'A' public_ip_arecord = False if 'public' in arecord: public_ip_arecord = arecord.pop('public') if public_ip_arecord: if 'publicIp' in r['result']: arecord['value'] = r['result']['publicIp'] elif 'public_ip' in eip_alloc: arecord['value'] = eip_alloc['public_ip'] else: msg = 'Unable to add an A record for the public IP address, a public IP address does not seem to be allocated to this ENI.' raise CommandExecutionError(msg) else: arecord['value'] = r['result']['private_ip_address'] if 'provider' in arecord: dns_provider = arecord.pop('provider') if dns_provider == 'boto_route53': if 'profile' not in arecord: arecord['profile'] = profile if 'key' not in arecord: arecord['key'] = key if 'keyid' not in arecord: arecord['keyid'] = keyid if 'region' not in arecord: arecord['region'] = region _ret = __states__['.'.join([dns_provider, 'present'])](**arecord) log.debug('ret from dns_provider.present = %s', _ret) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret return ret
Ensure the EC2 ENI exists. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. subnet_id The VPC subnet ID the ENI will exist within. subnet_name The VPC subnet name the ENI will exist within. private_ip_address The private ip address to use for this ENI. If this is not specified AWS will automatically assign a private IP address to the ENI. Must be specified at creation time; will be ignored afterward. description Description of the key. groups A list of security groups to apply to the ENI. source_dest_check Boolean specifying whether source/destination checking is enabled on the ENI. allocate_eip allocate and associate an EIP to the ENI. Could be 'standard' to allocate Elastic IP to EC2 region or 'vpc' to get it for a particular VPC .. versionchanged:: 2016.11.0 arecords A list of arecord dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_ec2.py#L168-L393
[ "def update(dest, upd, recursive_update=True, merge_lists=False):\n '''\n Recursive version of the default dict.update\n\n Merges upd recursively into dest\n\n If recursive_update=False, will use the classic dict.update, or fall back\n on a manual merge (helpful for non-dict types like FunctionWrapper)\n\n If merge_lists=True, will aggregate list object types instead of replace.\n The list in ``upd`` is added to the list in ``dest``, so the resulting list\n is ``dest[key] + upd[key]``. This behavior is only activated when\n recursive_update=True. By default merge_lists=False.\n\n .. versionchanged: 2016.11.6\n When merging lists, duplicate values are removed. Values already\n present in the ``dest`` list are not added from the ``upd`` list.\n '''\n if (not isinstance(dest, Mapping)) \\\n or (not isinstance(upd, Mapping)):\n raise TypeError('Cannot update using non-dict types in dictupdate.update()')\n updkeys = list(upd.keys())\n if not set(list(dest.keys())) & set(updkeys):\n recursive_update = False\n if recursive_update:\n for key in updkeys:\n val = upd[key]\n try:\n dest_subkey = dest.get(key, None)\n except AttributeError:\n dest_subkey = None\n if isinstance(dest_subkey, Mapping) \\\n and isinstance(val, Mapping):\n ret = update(dest_subkey, val, merge_lists=merge_lists)\n dest[key] = ret\n elif isinstance(dest_subkey, list) and isinstance(val, list):\n if merge_lists:\n merged = copy.deepcopy(dest_subkey)\n merged.extend([x for x in val if x not in merged])\n dest[key] = merged\n else:\n dest[key] = upd[key]\n else:\n dest[key] = upd[key]\n return dest\n try:\n for k in upd:\n dest[k] = upd[k]\n except AttributeError:\n # this mapping is not a dict\n for k in upd:\n dest[k] = upd[k]\n return dest\n", "def _eni_attribute(metadata, attr, value, region, key, keyid, profile):\n ret = {'result': True, 'comment': '', 'changes': {}}\n if metadata[attr] == value:\n return ret\n if __opts__['test']:\n ret['comment'] = 'ENI set to have {0} updated.'.format(attr)\n ret['result'] = None\n return ret\n result_update = __salt__['boto_ec2.modify_network_interface_attribute'](\n network_interface_id=metadata['id'], attr=attr,\n value=value, region=region, key=key, keyid=keyid, profile=profile\n )\n if 'error' in result_update:\n msg = 'Failed to update ENI {0}: {1}.'\n ret['result'] = False\n ret['comment'] = msg.format(attr, result_update['error']['message'])\n else:\n ret['comment'] = 'Updated ENI {0}.'.format(attr)\n ret['changes'][attr] = {\n 'old': metadata[attr],\n 'new': value\n }\n return ret\n", "def _eni_groups(metadata, groups, region, key, keyid, profile):\n ret = {'result': True, 'comment': '', 'changes': {}}\n group_ids = [g['id'] for g in metadata['groups']]\n group_ids.sort()\n _groups = __salt__['boto_secgroup.convert_to_group_ids'](\n groups, vpc_id=metadata['vpc_id'], region=region, key=key, keyid=keyid,\n profile=profile\n )\n if not _groups:\n ret['comment'] = 'Could not find secgroup ids for provided groups.'\n ret['result'] = False\n _groups.sort()\n if group_ids == _groups:\n return ret\n if __opts__['test']:\n ret['comment'] = 'ENI set to have groups updated.'\n ret['result'] = None\n return ret\n result_update = __salt__['boto_ec2.modify_network_interface_attribute'](\n network_interface_id=metadata['id'], attr='groups',\n value=_groups, region=region, key=key, keyid=keyid, profile=profile\n )\n if 'error' in result_update:\n msg = 'Failed to update ENI groups: {1}.'\n ret['result'] = False\n ret['comment'] = msg.format(result_update['error']['message'])\n else:\n ret['comment'] = 'Updated ENI groups.'\n ret['changes']['groups'] = {\n 'old': group_ids,\n 'new': _groups\n }\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Manage EC2 .. versionadded:: 2015.8.0 This module provides an interface to the Elastic Compute Cloud (EC2) service from AWS. The below code creates a key pair: .. code-block:: yaml create-key-pair: boto_ec2.key_present: - name: mykeypair - save_private: /root/ - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: 'ssh-rsa AAAA' - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs You can also use salt:// in order to define the public key. .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: salt://mybase/public_key.pub - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs The below code deletes a key pair: .. code-block:: yaml delete-key-pair: boto_ec2.key_absent: - name: mykeypair - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging from time import time, sleep # Import salt libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.utils.data import salt.utils.dictupdate as dictupdate from salt.exceptions import SaltInvocationError, CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' if 'boto_ec2.get_key' in __salt__: return 'boto_ec2' else: return False def key_present(name, save_private=None, upload_public=None, region=None, key=None, keyid=None, profile=None): ''' Ensure key pair is present. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) log.debug('exists is %s', exists) if upload_public is not None and 'salt://' in upload_public: try: upload_public = __salt__['cp.get_file_str'](upload_public) except IOError as e: log.debug(e) ret['comment'] = 'File {0} not found.'.format(upload_public) ret['result'] = False return ret if not exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be created.'.format(name) ret['result'] = None return ret if save_private and not upload_public: created = __salt__['boto_ec2.create_key']( name, save_private, region, key, keyid, profile ) if created: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['new'] = created else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) elif not save_private and upload_public: imported = __salt__['boto_ec2.import_key'](name, upload_public, region, key, keyid, profile) if imported: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['old'] = None ret['changes']['new'] = imported else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) else: ret['result'] = False ret['comment'] = 'You can either upload or download a private key ' else: ret['result'] = True ret['comment'] = 'The key name {0} already exists'.format(name) return ret def key_absent(name, region=None, key=None, keyid=None, profile=None): ''' Deletes a key pair ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be deleted.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_ec2.delete_key'](name, region, key, keyid, profile) log.debug('exists is %s', deleted) if deleted: ret['result'] = True ret['comment'] = 'The key {0} is deleted.'.format(name) ret['changes']['old'] = name else: ret['result'] = False ret['comment'] = 'Could not delete key {0} '.format(name) else: ret['result'] = True ret['comment'] = 'The key name {0} does not exist'.format(name) return ret def _eni_attribute(metadata, attr, value, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if metadata[attr] == value: return ret if __opts__['test']: ret['comment'] = 'ENI set to have {0} updated.'.format(attr) ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr=attr, value=value, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI {0}: {1}.' ret['result'] = False ret['comment'] = msg.format(attr, result_update['error']['message']) else: ret['comment'] = 'Updated ENI {0}.'.format(attr) ret['changes'][attr] = { 'old': metadata[attr], 'new': value } return ret def _eni_groups(metadata, groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} group_ids = [g['id'] for g in metadata['groups']] group_ids.sort() _groups = __salt__['boto_secgroup.convert_to_group_ids']( groups, vpc_id=metadata['vpc_id'], region=region, key=key, keyid=keyid, profile=profile ) if not _groups: ret['comment'] = 'Could not find secgroup ids for provided groups.' ret['result'] = False _groups.sort() if group_ids == _groups: return ret if __opts__['test']: ret['comment'] = 'ENI set to have groups updated.' ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr='groups', value=_groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI groups: {1}.' ret['result'] = False ret['comment'] = msg.format(result_update['error']['message']) else: ret['comment'] = 'Updated ENI groups.' ret['changes']['groups'] = { 'old': group_ids, 'new': _groups } return ret def eni_absent( name, release_eip=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI is absent. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. release_eip True/False - release any EIP associated with the ENI region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' ret['result'] = None return ret else: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' if release_eip and 'allocationId' in r['result']: ret['comment'] = ' '.join([ret['comment'], 'Allocated/associated EIP is set to be released']) ret['result'] = None return ret if 'id' in r['result']['attachment']: result_detach = __salt__['boto_ec2.detach_network_interface']( name=name, force=True, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_detach: ret['result'] = False ret['comment'] = 'Failed to detach ENI: {0}'.format( result_detach['error']['message'] ) return ret # TODO: Ensure the detach occurs before continuing result_delete = __salt__['boto_ec2.delete_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_delete: ret['result'] = False ret['comment'] = 'Failed to delete ENI: {0}'.format( result_delete['error']['message'] ) return ret ret['comment'] = 'Deleted ENI {0}'.format(name) ret['changes']['id'] = None if release_eip and 'allocationId' in r['result']: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=r['result']['allocationId'], region=region, key=key, keyid=keyid, profile=profile) if not _ret: ret['comment'] = ' '.join([ret['comment'], 'Failed to release EIP allocated to the ENI.']) ret['result'] = False return ret else: ret['comment'] = ' '.join([ret['comment'], 'EIP released.']) ret['changes']['eip released'] = True return ret def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs): ''' Create a snapshot from the given instance .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs): ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name) ret['result'] = False return ret ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name) ret['changes']['new'] = {ami_name: ami_name} if not wait_until_available: return ret starttime = time() while True: images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs) if images and images[0].state == 'available': break if time() - starttime > wait_timeout_seconds: if images: ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state) else: ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name) ret['result'] = False return ret sleep(5) return ret def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.data.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instances: _create = True elif len(instances) > 1: log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id') instance_id = None # No way to know, we'll just have to bail later.... else: instance_id = instances[0] if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if image_ids: image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address %s allocated.", r['public_ip']) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info( 'Waiting up to %s secs for new EIP %s to become available', tries * secs, public_ip or allocation_id ) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: if 'new' not in ret['changes']: ret['changes']['new'] = {} ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) curr = {} if not isinstance(curr, dict) else curr if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True if tags and instance_id is not None: tags = dict(tags) curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id}, region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {})) current = set(curr_tags.keys()) desired = set(tags.keys()) remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set... add = dict([(t, tags[t]) for t in desired - current]) replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)]) # Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative. add.update(replace) if add or remove: if __opts__['test']: ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if instance_name else name) else: if remove: if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while deleting tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret if add: if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while creating tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags return ret def instance_absent(name, instance_name=None, instance_id=None, release_eip=False, region=None, key=None, keyid=None, profile=None, filters=None): ''' Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12 ''' ### TODO - Implement 'force' option?? Would automagically turn off ### 'disableApiTermination', as needed, before trying to delete. ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not instance_id: try: instance_id = __salt__['boto_ec2.get_id'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states, filters=filters) except CommandExecutionError as e: ret['result'] = None ret['comment'] = ("Couldn't determine current status of instance " "{0}.".format(instance_name or name)) return ret instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, return_objs=True, filters=filters) if not instances: ret['result'] = True ret['comment'] = 'Instance {0} is already gone.'.format(instance_id) return ret instance = instances[0] ### Honor 'disableApiTermination' - if you want to override it, first use set_attribute() to turn it off no_can_do = __salt__['boto_ec2.get_attribute']('disableApiTermination', instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) if no_can_do.get('disableApiTermination') is True: ret['result'] = False ret['comment'] = 'Termination of instance {0} via the API is disabled.'.format(instance_id) return ret if __opts__['test']: ret['comment'] = 'The instance {0} is set to be deleted.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.terminate'](instance_id=instance_id, name=instance_name, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to terminate instance {0}.'.format(instance_id) return ret ret['changes']['old'] = {'instance_id': instance_id} ret['changes']['new'] = None if release_eip: ip = getattr(instance, 'ip_address', None) if ip: base_args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} public_ip = None alloc_id = None assoc_id = None if getattr(instance, 'vpc_id', None): r = __salt__['boto_ec2.get_eip_address_info'](addresses=ip, **base_args) if r and 'allocation_id' in r[0]: alloc_id = r[0]['allocation_id'] assoc_id = r[0].get('association_id') else: # I /believe/ this situation is impossible but let's hedge our bets... ret['result'] = False ret['comment'] = "Can't determine AllocationId for address {0}.".format(ip) return ret else: public_ip = instance.ip_address if assoc_id: # Race here - sometimes the terminate above will already have dropped this if not __salt__['boto_ec2.disassociate_eip_address'](association_id=assoc_id, **base_args): log.warning("Failed to disassociate EIP %s.", ip) if __salt__['boto_ec2.release_eip_address'](allocation_id=alloc_id, public_ip=public_ip, **base_args): log.info("Released EIP address %s", public_ip or r[0]['public_ip']) ret['changes']['old']['public_ip'] = public_ip or r[0]['public_ip'] else: ret['result'] = False ret['comment'] = "Failed to release EIP {0}.".format(ip) return ret return ret def volume_absent(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is detached and absent. .. versionadded:: 2016.11.0 name State definition name. volume_name Name tag associated with the volume. For safety, if this matches more than one volume, the state will refuse to apply. volume_id Resource ID of the volume. instance_name Only remove volume if it is attached to instance with this Name tag. Exclusive with 'instance_id'. Requires 'device'. instance_id Only remove volume if it is attached to this instance. Exclusive with 'instance_name'. Requires 'device'. device Match by device rather than ID. Requires one of 'instance_name' or 'instance_id'. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } filters = {} running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id, instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " "'instance_name', or 'instance_id' must be provided.") if (instance_name or instance_id) and not device: raise SaltInvocationError("Parameter 'device' is required when either " "'instance_name' or 'instance_id' is specified.") if volume_id: filters.update({'volume-id': volume_id}) if volume_name: filters.update({'tag:Name': volume_name}) if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instance_id: ret['comment'] = ('Instance with Name {0} not found. Assuming ' 'associated volumes gone.'.format(instance_name)) return ret if instance_id: filters.update({'attachment.instance-id': instance_id}) if device: filters.update({'attachment.device': device}) args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if not vols: ret['comment'] = 'Volume matching criteria not found, assuming already absent' return ret if len(vols) > 1: msg = "More than one volume matched criteria, can't continue in state {0}".format(name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret vol = vols[0] log.info('Matched Volume ID %s', vol) if __opts__['test']: ret['comment'] = 'The volume {0} is set to be deleted.'.format(vol) ret['result'] = None return ret if __salt__['boto_ec2.delete_volume'](volume_id=vol, force=True, **args): ret['comment'] = 'Volume {0} deleted.'.format(vol) ret['changes'] = {'old': {'volume_id': vol}, 'new': {'volume_id': None}} else: ret['comment'] = 'Error deleting volume {0}.'.format(vol) ret['result'] = False return ret def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ''' Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags would be updated.' ret['changes'] = r['changes'] ret['result'] = None else: ret['comment'] = 'Error validating requested volume tags.' ret['result'] = False return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret def volume_present(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} old_dict = {} new_dict = {} running_states = ('running', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " " must be provided.") if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'instance_name', or 'instance_id'" " must be provided.") if device is None: raise SaltInvocationError("Parameter 'device' is required.") args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, in_states=running_states, **args) if not instance_id: raise SaltInvocationError('Instance with Name {0} not found.'.format(instance_name)) instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, return_objs=True, **args) instance = instances[0] if volume_name: filters = {} filters.update({'tag:Name': volume_name}) vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if len(vols) > 1: msg = "More than one volume matched volume name {0}, can't continue in state {1}".format(volume_name, name) raise SaltInvocationError(msg) if not vols: if __opts__['test']: ret['comment'] = ('The volume with name {0} is set to be created and attached' ' on {1}({2}).'.format(volume_name, instance_id, device)) ret['result'] = None return ret _rt = __salt__['boto_ec2.create_volume'](zone_name=instance.placement, size=size, snapshot_id=snapshot_id, volume_type=volume_type, iops=iops, encrypted=encrypted, kms_key_id=kms_key_id, wait_for_creation=True, **args) if 'result' in _rt: volume_id = _rt['result'] else: raise SaltInvocationError('Error creating volume with name {0}.'.format(volume_name)) _rt = __salt__['boto_ec2.set_volumes_tags'](tag_maps=[{ 'filters': {'volume_ids': [volume_id]}, 'tags': {'Name': volume_name} }], **args) if _rt['success'] is False: raise SaltInvocationError('Error updating requested volume ' '{0} with name {1}. {2}'.format(volume_id, volume_name, _rt['comment'])) old_dict['volume_id'] = None new_dict['volume_id'] = volume_id else: volume_id = vols[0] vols = __salt__['boto_ec2.get_all_volumes'](volume_ids=[volume_id], return_objs=True, **args) if not vols: raise SaltInvocationError('Volume {0} do not exist'.format(volume_id)) vol = vols[0] if vol.zone != instance.placement: raise SaltInvocationError(('Volume {0} in {1} cannot attach to instance' ' {2} in {3}.').format(volume_id, vol.zone, instance_id, instance.placement)) attach_data = vol.attach_data if attach_data is not None and attach_data.instance_id is not None: if instance_id == attach_data.instance_id and device == attach_data.device: ret['comment'] = 'The volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device) return ret else: if __opts__['test']: ret['comment'] = ('The volume {0} is set to be detached' ' from {1}({2} and attached on {3}({4}).').format(attach_data.instance_id, attach_data.devic, volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.detach_volume'](volume_id=volume_id, wait_for_detachement=True, **args): ret['comment'] = 'Volume {0} is detached from {1}({2}).'.format(volume_id, attach_data.instance_id, attach_data.device) old_dict['instance_id'] = attach_data.instance_id old_dict['device'] = attach_data.device else: raise SaltInvocationError(('The volume {0} is already attached on instance {1}({2}).' ' Failed to detach').format(volume_id, attach_data.instance_id, attach_data.device)) else: old_dict['instance_id'] = instance_id old_dict['device'] = None if __opts__['test']: ret['comment'] = 'The volume {0} is set to be attached on {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.attach_volume'](volume_id=volume_id, instance_id=instance_id, device=device, **args): ret['comment'] = ' '.join([ ret['comment'], 'Volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device)]) new_dict['instance_id'] = instance_id new_dict['device'] = device ret['changes'] = {'old': old_dict, 'new': new_dict} else: ret['comment'] = 'Error attaching volume {0} to instance {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = False return ret def private_ips_present(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, allow_reassignment=False, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI has secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be present on the ENI. allow_reassignment (Boolean) - If true, will reassign a secondary private ip address associated with another ENI. If false, state will fail if the secondary private ip address is associated with another ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to associate with the " "ENI") ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'old': [], 'new': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any new secondary private ips to add to the eni if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) ips_to_add = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['old']: ips_to_add.append(private_ip) if ips_to_add: if not __opts__['test']: # Assign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_add, 'allow_reassignment': allow_reassignment, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.assign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly assigned to ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_added = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['new']: ips_not_added.append(private_ip) # Display results if ips_not_added: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to add: {1}\n' 'could not add the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_add), '\n\t- ' + '\n\t- '.join(ips_not_added))) else: ret['comment'] = "added ips: {0}".format( '\n\t- ' + '\n\t- '.join(ips_to_add)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to add ret['comment'] = ('ips on eni: {0}\n' 'ips that would be added: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_add))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on eni: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret def private_ips_absent(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI does not have secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be absent on the ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to unassociate with " "the ENI") if not isinstance(private_ip_addresses, list): private_ip_addresses = [private_ip_addresses] ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'new': [], 'old': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any old private ips to remove from the eni primary_private_ip = None if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) if eni_pip['primary']: primary_private_ip = eni_pip['private_ip_address'] ips_to_remove = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['old']: ips_to_remove.append(private_ip) if private_ip == primary_private_ip: ret['result'] = False ret['comment'] = ('You cannot unassign the primary private ip address ({0}) on an ' 'eni\n' 'ips on eni: {1}\n' 'attempted to remove: {2}\n'.format( primary_private_ip, '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(private_ip_addresses))) ret['changes'] = {} return ret if ips_to_remove: if not __opts__['test']: # Unassign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_remove, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.unassign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly unassigned from ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_removed = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['new']: ips_not_removed.append(private_ip) if ips_not_removed: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to remove: {1}\n' 'could not remove the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_remove), '\n\t- ' + '\n\t- '.join(ips_not_removed))) else: ret['comment'] = "removed ips: {0}".format('\n\t- ' + '\n\t- '.join(ips_to_remove)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to remove ret['comment'] = ('ips on eni: {0}\n' 'ips that would be removed: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_remove))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on network interface: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret
saltstack/salt
salt/states/boto_ec2.py
eni_absent
python
def eni_absent( name, release_eip=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI is absent. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. release_eip True/False - release any EIP associated with the ENI region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' ret['result'] = None return ret else: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' if release_eip and 'allocationId' in r['result']: ret['comment'] = ' '.join([ret['comment'], 'Allocated/associated EIP is set to be released']) ret['result'] = None return ret if 'id' in r['result']['attachment']: result_detach = __salt__['boto_ec2.detach_network_interface']( name=name, force=True, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_detach: ret['result'] = False ret['comment'] = 'Failed to detach ENI: {0}'.format( result_detach['error']['message'] ) return ret # TODO: Ensure the detach occurs before continuing result_delete = __salt__['boto_ec2.delete_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_delete: ret['result'] = False ret['comment'] = 'Failed to delete ENI: {0}'.format( result_delete['error']['message'] ) return ret ret['comment'] = 'Deleted ENI {0}'.format(name) ret['changes']['id'] = None if release_eip and 'allocationId' in r['result']: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=r['result']['allocationId'], region=region, key=key, keyid=keyid, profile=profile) if not _ret: ret['comment'] = ' '.join([ret['comment'], 'Failed to release EIP allocated to the ENI.']) ret['result'] = False return ret else: ret['comment'] = ' '.join([ret['comment'], 'EIP released.']) ret['changes']['eip released'] = True return ret
Ensure the EC2 ENI is absent. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. release_eip True/False - release any EIP associated with the ENI region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_ec2.py#L456-L547
null
# -*- coding: utf-8 -*- ''' Manage EC2 .. versionadded:: 2015.8.0 This module provides an interface to the Elastic Compute Cloud (EC2) service from AWS. The below code creates a key pair: .. code-block:: yaml create-key-pair: boto_ec2.key_present: - name: mykeypair - save_private: /root/ - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: 'ssh-rsa AAAA' - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs You can also use salt:// in order to define the public key. .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: salt://mybase/public_key.pub - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs The below code deletes a key pair: .. code-block:: yaml delete-key-pair: boto_ec2.key_absent: - name: mykeypair - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging from time import time, sleep # Import salt libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.utils.data import salt.utils.dictupdate as dictupdate from salt.exceptions import SaltInvocationError, CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' if 'boto_ec2.get_key' in __salt__: return 'boto_ec2' else: return False def key_present(name, save_private=None, upload_public=None, region=None, key=None, keyid=None, profile=None): ''' Ensure key pair is present. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) log.debug('exists is %s', exists) if upload_public is not None and 'salt://' in upload_public: try: upload_public = __salt__['cp.get_file_str'](upload_public) except IOError as e: log.debug(e) ret['comment'] = 'File {0} not found.'.format(upload_public) ret['result'] = False return ret if not exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be created.'.format(name) ret['result'] = None return ret if save_private and not upload_public: created = __salt__['boto_ec2.create_key']( name, save_private, region, key, keyid, profile ) if created: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['new'] = created else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) elif not save_private and upload_public: imported = __salt__['boto_ec2.import_key'](name, upload_public, region, key, keyid, profile) if imported: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['old'] = None ret['changes']['new'] = imported else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) else: ret['result'] = False ret['comment'] = 'You can either upload or download a private key ' else: ret['result'] = True ret['comment'] = 'The key name {0} already exists'.format(name) return ret def key_absent(name, region=None, key=None, keyid=None, profile=None): ''' Deletes a key pair ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be deleted.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_ec2.delete_key'](name, region, key, keyid, profile) log.debug('exists is %s', deleted) if deleted: ret['result'] = True ret['comment'] = 'The key {0} is deleted.'.format(name) ret['changes']['old'] = name else: ret['result'] = False ret['comment'] = 'Could not delete key {0} '.format(name) else: ret['result'] = True ret['comment'] = 'The key name {0} does not exist'.format(name) return ret def eni_present( name, subnet_id=None, subnet_name=None, private_ip_address=None, description=None, groups=None, source_dest_check=True, allocate_eip=None, arecords=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI exists. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. subnet_id The VPC subnet ID the ENI will exist within. subnet_name The VPC subnet name the ENI will exist within. private_ip_address The private ip address to use for this ENI. If this is not specified AWS will automatically assign a private IP address to the ENI. Must be specified at creation time; will be ignored afterward. description Description of the key. groups A list of security groups to apply to the ENI. source_dest_check Boolean specifying whether source/destination checking is enabled on the ENI. allocate_eip allocate and associate an EIP to the ENI. Could be 'standard' to allocate Elastic IP to EC2 region or 'vpc' to get it for a particular VPC .. versionchanged:: 2016.11.0 arecords A list of arecord dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((subnet_id, subnet_name)): raise SaltInvocationError('One (but not both) of subnet_id or ' 'subnet_name must be provided.') if not groups: raise SaltInvocationError('groups is a required argument.') if not isinstance(groups, list): raise SaltInvocationError('groups must be a list.') if not isinstance(source_dest_check, bool): raise SaltInvocationError('source_dest_check must be a bool.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be created.' if allocate_eip: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated/assocaited to the ENI.']) if arecords: ret['comment'] = ' '.join([ret['comment'], 'A records are set to be created.']) ret['result'] = None return ret result_create = __salt__['boto_ec2.create_network_interface']( name, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, description=description, groups=groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_create: ret['result'] = False ret['comment'] = 'Failed to create ENI: {0}'.format( result_create['error']['message'] ) return ret r['result'] = result_create['result'] ret['comment'] = 'Created ENI {0}'.format(name) ret['changes']['id'] = r['result']['id'] else: _ret = _eni_attribute( r['result'], 'description', description, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = _ret['comment'] if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _eni_groups( r['result'], groups, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret # Actions that need to occur whether creating or updating _ret = _eni_attribute( r['result'], 'source_dest_check', source_dest_check, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret if allocate_eip: if 'allocationId' not in r['result']: if __opts__['test']: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated and assocaited to the ENI.']) else: domain = 'vpc' if allocate_eip == 'vpc' else None eip_alloc = __salt__['boto_ec2.allocate_eip_address'](domain=domain, region=region, key=key, keyid=keyid, profile=profile) if eip_alloc: _ret = __salt__['boto_ec2.associate_eip_address'](instance_id=None, instance_name=None, public_ip=None, allocation_id=eip_alloc['allocation_id'], network_interface_id=r['result']['id'], private_ip_address=None, allow_reassociation=False, region=region, key=key, keyid=keyid, profile=profile) if not _ret: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=eip_alloc['allocation_id'], region=region, key=key, keyid=keyid, profile=profile) ret['result'] = False msg = 'Failed to assocaite the allocated EIP address with the ENI. The EIP {0}'.format('was successfully released.' if _ret else 'was NOT RELEASED.') ret['comment'] = ' '.join([ret['comment'], msg]) return ret else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to allocate an EIP address']) return ret else: ret['comment'] = ' '.join([ret['comment'], 'An EIP is already allocated/assocaited to the ENI']) if arecords: for arecord in arecords: if 'name' not in arecord: msg = 'The arecord must contain a "name" property.' raise SaltInvocationError(msg) log.debug('processing arecord %s', arecord) _ret = None dns_provider = 'boto_route53' arecord['record_type'] = 'A' public_ip_arecord = False if 'public' in arecord: public_ip_arecord = arecord.pop('public') if public_ip_arecord: if 'publicIp' in r['result']: arecord['value'] = r['result']['publicIp'] elif 'public_ip' in eip_alloc: arecord['value'] = eip_alloc['public_ip'] else: msg = 'Unable to add an A record for the public IP address, a public IP address does not seem to be allocated to this ENI.' raise CommandExecutionError(msg) else: arecord['value'] = r['result']['private_ip_address'] if 'provider' in arecord: dns_provider = arecord.pop('provider') if dns_provider == 'boto_route53': if 'profile' not in arecord: arecord['profile'] = profile if 'key' not in arecord: arecord['key'] = key if 'keyid' not in arecord: arecord['keyid'] = keyid if 'region' not in arecord: arecord['region'] = region _ret = __states__['.'.join([dns_provider, 'present'])](**arecord) log.debug('ret from dns_provider.present = %s', _ret) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret return ret def _eni_attribute(metadata, attr, value, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if metadata[attr] == value: return ret if __opts__['test']: ret['comment'] = 'ENI set to have {0} updated.'.format(attr) ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr=attr, value=value, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI {0}: {1}.' ret['result'] = False ret['comment'] = msg.format(attr, result_update['error']['message']) else: ret['comment'] = 'Updated ENI {0}.'.format(attr) ret['changes'][attr] = { 'old': metadata[attr], 'new': value } return ret def _eni_groups(metadata, groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} group_ids = [g['id'] for g in metadata['groups']] group_ids.sort() _groups = __salt__['boto_secgroup.convert_to_group_ids']( groups, vpc_id=metadata['vpc_id'], region=region, key=key, keyid=keyid, profile=profile ) if not _groups: ret['comment'] = 'Could not find secgroup ids for provided groups.' ret['result'] = False _groups.sort() if group_ids == _groups: return ret if __opts__['test']: ret['comment'] = 'ENI set to have groups updated.' ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr='groups', value=_groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI groups: {1}.' ret['result'] = False ret['comment'] = msg.format(result_update['error']['message']) else: ret['comment'] = 'Updated ENI groups.' ret['changes']['groups'] = { 'old': group_ids, 'new': _groups } return ret def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs): ''' Create a snapshot from the given instance .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs): ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name) ret['result'] = False return ret ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name) ret['changes']['new'] = {ami_name: ami_name} if not wait_until_available: return ret starttime = time() while True: images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs) if images and images[0].state == 'available': break if time() - starttime > wait_timeout_seconds: if images: ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state) else: ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name) ret['result'] = False return ret sleep(5) return ret def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.data.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instances: _create = True elif len(instances) > 1: log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id') instance_id = None # No way to know, we'll just have to bail later.... else: instance_id = instances[0] if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if image_ids: image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address %s allocated.", r['public_ip']) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info( 'Waiting up to %s secs for new EIP %s to become available', tries * secs, public_ip or allocation_id ) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: if 'new' not in ret['changes']: ret['changes']['new'] = {} ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) curr = {} if not isinstance(curr, dict) else curr if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True if tags and instance_id is not None: tags = dict(tags) curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id}, region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {})) current = set(curr_tags.keys()) desired = set(tags.keys()) remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set... add = dict([(t, tags[t]) for t in desired - current]) replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)]) # Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative. add.update(replace) if add or remove: if __opts__['test']: ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if instance_name else name) else: if remove: if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while deleting tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret if add: if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while creating tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags return ret def instance_absent(name, instance_name=None, instance_id=None, release_eip=False, region=None, key=None, keyid=None, profile=None, filters=None): ''' Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12 ''' ### TODO - Implement 'force' option?? Would automagically turn off ### 'disableApiTermination', as needed, before trying to delete. ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not instance_id: try: instance_id = __salt__['boto_ec2.get_id'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states, filters=filters) except CommandExecutionError as e: ret['result'] = None ret['comment'] = ("Couldn't determine current status of instance " "{0}.".format(instance_name or name)) return ret instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, return_objs=True, filters=filters) if not instances: ret['result'] = True ret['comment'] = 'Instance {0} is already gone.'.format(instance_id) return ret instance = instances[0] ### Honor 'disableApiTermination' - if you want to override it, first use set_attribute() to turn it off no_can_do = __salt__['boto_ec2.get_attribute']('disableApiTermination', instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) if no_can_do.get('disableApiTermination') is True: ret['result'] = False ret['comment'] = 'Termination of instance {0} via the API is disabled.'.format(instance_id) return ret if __opts__['test']: ret['comment'] = 'The instance {0} is set to be deleted.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.terminate'](instance_id=instance_id, name=instance_name, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to terminate instance {0}.'.format(instance_id) return ret ret['changes']['old'] = {'instance_id': instance_id} ret['changes']['new'] = None if release_eip: ip = getattr(instance, 'ip_address', None) if ip: base_args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} public_ip = None alloc_id = None assoc_id = None if getattr(instance, 'vpc_id', None): r = __salt__['boto_ec2.get_eip_address_info'](addresses=ip, **base_args) if r and 'allocation_id' in r[0]: alloc_id = r[0]['allocation_id'] assoc_id = r[0].get('association_id') else: # I /believe/ this situation is impossible but let's hedge our bets... ret['result'] = False ret['comment'] = "Can't determine AllocationId for address {0}.".format(ip) return ret else: public_ip = instance.ip_address if assoc_id: # Race here - sometimes the terminate above will already have dropped this if not __salt__['boto_ec2.disassociate_eip_address'](association_id=assoc_id, **base_args): log.warning("Failed to disassociate EIP %s.", ip) if __salt__['boto_ec2.release_eip_address'](allocation_id=alloc_id, public_ip=public_ip, **base_args): log.info("Released EIP address %s", public_ip or r[0]['public_ip']) ret['changes']['old']['public_ip'] = public_ip or r[0]['public_ip'] else: ret['result'] = False ret['comment'] = "Failed to release EIP {0}.".format(ip) return ret return ret def volume_absent(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is detached and absent. .. versionadded:: 2016.11.0 name State definition name. volume_name Name tag associated with the volume. For safety, if this matches more than one volume, the state will refuse to apply. volume_id Resource ID of the volume. instance_name Only remove volume if it is attached to instance with this Name tag. Exclusive with 'instance_id'. Requires 'device'. instance_id Only remove volume if it is attached to this instance. Exclusive with 'instance_name'. Requires 'device'. device Match by device rather than ID. Requires one of 'instance_name' or 'instance_id'. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } filters = {} running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id, instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " "'instance_name', or 'instance_id' must be provided.") if (instance_name or instance_id) and not device: raise SaltInvocationError("Parameter 'device' is required when either " "'instance_name' or 'instance_id' is specified.") if volume_id: filters.update({'volume-id': volume_id}) if volume_name: filters.update({'tag:Name': volume_name}) if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instance_id: ret['comment'] = ('Instance with Name {0} not found. Assuming ' 'associated volumes gone.'.format(instance_name)) return ret if instance_id: filters.update({'attachment.instance-id': instance_id}) if device: filters.update({'attachment.device': device}) args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if not vols: ret['comment'] = 'Volume matching criteria not found, assuming already absent' return ret if len(vols) > 1: msg = "More than one volume matched criteria, can't continue in state {0}".format(name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret vol = vols[0] log.info('Matched Volume ID %s', vol) if __opts__['test']: ret['comment'] = 'The volume {0} is set to be deleted.'.format(vol) ret['result'] = None return ret if __salt__['boto_ec2.delete_volume'](volume_id=vol, force=True, **args): ret['comment'] = 'Volume {0} deleted.'.format(vol) ret['changes'] = {'old': {'volume_id': vol}, 'new': {'volume_id': None}} else: ret['comment'] = 'Error deleting volume {0}.'.format(vol) ret['result'] = False return ret def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ''' Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags would be updated.' ret['changes'] = r['changes'] ret['result'] = None else: ret['comment'] = 'Error validating requested volume tags.' ret['result'] = False return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret def volume_present(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} old_dict = {} new_dict = {} running_states = ('running', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " " must be provided.") if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'instance_name', or 'instance_id'" " must be provided.") if device is None: raise SaltInvocationError("Parameter 'device' is required.") args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, in_states=running_states, **args) if not instance_id: raise SaltInvocationError('Instance with Name {0} not found.'.format(instance_name)) instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, return_objs=True, **args) instance = instances[0] if volume_name: filters = {} filters.update({'tag:Name': volume_name}) vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if len(vols) > 1: msg = "More than one volume matched volume name {0}, can't continue in state {1}".format(volume_name, name) raise SaltInvocationError(msg) if not vols: if __opts__['test']: ret['comment'] = ('The volume with name {0} is set to be created and attached' ' on {1}({2}).'.format(volume_name, instance_id, device)) ret['result'] = None return ret _rt = __salt__['boto_ec2.create_volume'](zone_name=instance.placement, size=size, snapshot_id=snapshot_id, volume_type=volume_type, iops=iops, encrypted=encrypted, kms_key_id=kms_key_id, wait_for_creation=True, **args) if 'result' in _rt: volume_id = _rt['result'] else: raise SaltInvocationError('Error creating volume with name {0}.'.format(volume_name)) _rt = __salt__['boto_ec2.set_volumes_tags'](tag_maps=[{ 'filters': {'volume_ids': [volume_id]}, 'tags': {'Name': volume_name} }], **args) if _rt['success'] is False: raise SaltInvocationError('Error updating requested volume ' '{0} with name {1}. {2}'.format(volume_id, volume_name, _rt['comment'])) old_dict['volume_id'] = None new_dict['volume_id'] = volume_id else: volume_id = vols[0] vols = __salt__['boto_ec2.get_all_volumes'](volume_ids=[volume_id], return_objs=True, **args) if not vols: raise SaltInvocationError('Volume {0} do not exist'.format(volume_id)) vol = vols[0] if vol.zone != instance.placement: raise SaltInvocationError(('Volume {0} in {1} cannot attach to instance' ' {2} in {3}.').format(volume_id, vol.zone, instance_id, instance.placement)) attach_data = vol.attach_data if attach_data is not None and attach_data.instance_id is not None: if instance_id == attach_data.instance_id and device == attach_data.device: ret['comment'] = 'The volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device) return ret else: if __opts__['test']: ret['comment'] = ('The volume {0} is set to be detached' ' from {1}({2} and attached on {3}({4}).').format(attach_data.instance_id, attach_data.devic, volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.detach_volume'](volume_id=volume_id, wait_for_detachement=True, **args): ret['comment'] = 'Volume {0} is detached from {1}({2}).'.format(volume_id, attach_data.instance_id, attach_data.device) old_dict['instance_id'] = attach_data.instance_id old_dict['device'] = attach_data.device else: raise SaltInvocationError(('The volume {0} is already attached on instance {1}({2}).' ' Failed to detach').format(volume_id, attach_data.instance_id, attach_data.device)) else: old_dict['instance_id'] = instance_id old_dict['device'] = None if __opts__['test']: ret['comment'] = 'The volume {0} is set to be attached on {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.attach_volume'](volume_id=volume_id, instance_id=instance_id, device=device, **args): ret['comment'] = ' '.join([ ret['comment'], 'Volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device)]) new_dict['instance_id'] = instance_id new_dict['device'] = device ret['changes'] = {'old': old_dict, 'new': new_dict} else: ret['comment'] = 'Error attaching volume {0} to instance {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = False return ret def private_ips_present(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, allow_reassignment=False, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI has secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be present on the ENI. allow_reassignment (Boolean) - If true, will reassign a secondary private ip address associated with another ENI. If false, state will fail if the secondary private ip address is associated with another ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to associate with the " "ENI") ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'old': [], 'new': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any new secondary private ips to add to the eni if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) ips_to_add = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['old']: ips_to_add.append(private_ip) if ips_to_add: if not __opts__['test']: # Assign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_add, 'allow_reassignment': allow_reassignment, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.assign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly assigned to ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_added = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['new']: ips_not_added.append(private_ip) # Display results if ips_not_added: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to add: {1}\n' 'could not add the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_add), '\n\t- ' + '\n\t- '.join(ips_not_added))) else: ret['comment'] = "added ips: {0}".format( '\n\t- ' + '\n\t- '.join(ips_to_add)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to add ret['comment'] = ('ips on eni: {0}\n' 'ips that would be added: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_add))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on eni: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret def private_ips_absent(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI does not have secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be absent on the ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to unassociate with " "the ENI") if not isinstance(private_ip_addresses, list): private_ip_addresses = [private_ip_addresses] ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'new': [], 'old': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any old private ips to remove from the eni primary_private_ip = None if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) if eni_pip['primary']: primary_private_ip = eni_pip['private_ip_address'] ips_to_remove = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['old']: ips_to_remove.append(private_ip) if private_ip == primary_private_ip: ret['result'] = False ret['comment'] = ('You cannot unassign the primary private ip address ({0}) on an ' 'eni\n' 'ips on eni: {1}\n' 'attempted to remove: {2}\n'.format( primary_private_ip, '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(private_ip_addresses))) ret['changes'] = {} return ret if ips_to_remove: if not __opts__['test']: # Unassign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_remove, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.unassign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly unassigned from ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_removed = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['new']: ips_not_removed.append(private_ip) if ips_not_removed: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to remove: {1}\n' 'could not remove the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_remove), '\n\t- ' + '\n\t- '.join(ips_not_removed))) else: ret['comment'] = "removed ips: {0}".format('\n\t- ' + '\n\t- '.join(ips_to_remove)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to remove ret['comment'] = ('ips on eni: {0}\n' 'ips that would be removed: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_remove))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on network interface: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret
saltstack/salt
salt/states/boto_ec2.py
snapshot_created
python
def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs): ''' Create a snapshot from the given instance .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs): ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name) ret['result'] = False return ret ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name) ret['changes']['new'] = {ami_name: ami_name} if not wait_until_available: return ret starttime = time() while True: images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs) if images and images[0].state == 'available': break if time() - starttime > wait_timeout_seconds: if images: ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state) else: ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name) ret['result'] = False return ret sleep(5) return ret
Create a snapshot from the given instance .. versionadded:: 2016.3.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_ec2.py#L550-L586
null
# -*- coding: utf-8 -*- ''' Manage EC2 .. versionadded:: 2015.8.0 This module provides an interface to the Elastic Compute Cloud (EC2) service from AWS. The below code creates a key pair: .. code-block:: yaml create-key-pair: boto_ec2.key_present: - name: mykeypair - save_private: /root/ - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: 'ssh-rsa AAAA' - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs You can also use salt:// in order to define the public key. .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: salt://mybase/public_key.pub - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs The below code deletes a key pair: .. code-block:: yaml delete-key-pair: boto_ec2.key_absent: - name: mykeypair - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging from time import time, sleep # Import salt libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.utils.data import salt.utils.dictupdate as dictupdate from salt.exceptions import SaltInvocationError, CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' if 'boto_ec2.get_key' in __salt__: return 'boto_ec2' else: return False def key_present(name, save_private=None, upload_public=None, region=None, key=None, keyid=None, profile=None): ''' Ensure key pair is present. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) log.debug('exists is %s', exists) if upload_public is not None and 'salt://' in upload_public: try: upload_public = __salt__['cp.get_file_str'](upload_public) except IOError as e: log.debug(e) ret['comment'] = 'File {0} not found.'.format(upload_public) ret['result'] = False return ret if not exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be created.'.format(name) ret['result'] = None return ret if save_private and not upload_public: created = __salt__['boto_ec2.create_key']( name, save_private, region, key, keyid, profile ) if created: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['new'] = created else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) elif not save_private and upload_public: imported = __salt__['boto_ec2.import_key'](name, upload_public, region, key, keyid, profile) if imported: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['old'] = None ret['changes']['new'] = imported else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) else: ret['result'] = False ret['comment'] = 'You can either upload or download a private key ' else: ret['result'] = True ret['comment'] = 'The key name {0} already exists'.format(name) return ret def key_absent(name, region=None, key=None, keyid=None, profile=None): ''' Deletes a key pair ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be deleted.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_ec2.delete_key'](name, region, key, keyid, profile) log.debug('exists is %s', deleted) if deleted: ret['result'] = True ret['comment'] = 'The key {0} is deleted.'.format(name) ret['changes']['old'] = name else: ret['result'] = False ret['comment'] = 'Could not delete key {0} '.format(name) else: ret['result'] = True ret['comment'] = 'The key name {0} does not exist'.format(name) return ret def eni_present( name, subnet_id=None, subnet_name=None, private_ip_address=None, description=None, groups=None, source_dest_check=True, allocate_eip=None, arecords=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI exists. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. subnet_id The VPC subnet ID the ENI will exist within. subnet_name The VPC subnet name the ENI will exist within. private_ip_address The private ip address to use for this ENI. If this is not specified AWS will automatically assign a private IP address to the ENI. Must be specified at creation time; will be ignored afterward. description Description of the key. groups A list of security groups to apply to the ENI. source_dest_check Boolean specifying whether source/destination checking is enabled on the ENI. allocate_eip allocate and associate an EIP to the ENI. Could be 'standard' to allocate Elastic IP to EC2 region or 'vpc' to get it for a particular VPC .. versionchanged:: 2016.11.0 arecords A list of arecord dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((subnet_id, subnet_name)): raise SaltInvocationError('One (but not both) of subnet_id or ' 'subnet_name must be provided.') if not groups: raise SaltInvocationError('groups is a required argument.') if not isinstance(groups, list): raise SaltInvocationError('groups must be a list.') if not isinstance(source_dest_check, bool): raise SaltInvocationError('source_dest_check must be a bool.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be created.' if allocate_eip: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated/assocaited to the ENI.']) if arecords: ret['comment'] = ' '.join([ret['comment'], 'A records are set to be created.']) ret['result'] = None return ret result_create = __salt__['boto_ec2.create_network_interface']( name, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, description=description, groups=groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_create: ret['result'] = False ret['comment'] = 'Failed to create ENI: {0}'.format( result_create['error']['message'] ) return ret r['result'] = result_create['result'] ret['comment'] = 'Created ENI {0}'.format(name) ret['changes']['id'] = r['result']['id'] else: _ret = _eni_attribute( r['result'], 'description', description, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = _ret['comment'] if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _eni_groups( r['result'], groups, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret # Actions that need to occur whether creating or updating _ret = _eni_attribute( r['result'], 'source_dest_check', source_dest_check, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret if allocate_eip: if 'allocationId' not in r['result']: if __opts__['test']: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated and assocaited to the ENI.']) else: domain = 'vpc' if allocate_eip == 'vpc' else None eip_alloc = __salt__['boto_ec2.allocate_eip_address'](domain=domain, region=region, key=key, keyid=keyid, profile=profile) if eip_alloc: _ret = __salt__['boto_ec2.associate_eip_address'](instance_id=None, instance_name=None, public_ip=None, allocation_id=eip_alloc['allocation_id'], network_interface_id=r['result']['id'], private_ip_address=None, allow_reassociation=False, region=region, key=key, keyid=keyid, profile=profile) if not _ret: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=eip_alloc['allocation_id'], region=region, key=key, keyid=keyid, profile=profile) ret['result'] = False msg = 'Failed to assocaite the allocated EIP address with the ENI. The EIP {0}'.format('was successfully released.' if _ret else 'was NOT RELEASED.') ret['comment'] = ' '.join([ret['comment'], msg]) return ret else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to allocate an EIP address']) return ret else: ret['comment'] = ' '.join([ret['comment'], 'An EIP is already allocated/assocaited to the ENI']) if arecords: for arecord in arecords: if 'name' not in arecord: msg = 'The arecord must contain a "name" property.' raise SaltInvocationError(msg) log.debug('processing arecord %s', arecord) _ret = None dns_provider = 'boto_route53' arecord['record_type'] = 'A' public_ip_arecord = False if 'public' in arecord: public_ip_arecord = arecord.pop('public') if public_ip_arecord: if 'publicIp' in r['result']: arecord['value'] = r['result']['publicIp'] elif 'public_ip' in eip_alloc: arecord['value'] = eip_alloc['public_ip'] else: msg = 'Unable to add an A record for the public IP address, a public IP address does not seem to be allocated to this ENI.' raise CommandExecutionError(msg) else: arecord['value'] = r['result']['private_ip_address'] if 'provider' in arecord: dns_provider = arecord.pop('provider') if dns_provider == 'boto_route53': if 'profile' not in arecord: arecord['profile'] = profile if 'key' not in arecord: arecord['key'] = key if 'keyid' not in arecord: arecord['keyid'] = keyid if 'region' not in arecord: arecord['region'] = region _ret = __states__['.'.join([dns_provider, 'present'])](**arecord) log.debug('ret from dns_provider.present = %s', _ret) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret return ret def _eni_attribute(metadata, attr, value, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if metadata[attr] == value: return ret if __opts__['test']: ret['comment'] = 'ENI set to have {0} updated.'.format(attr) ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr=attr, value=value, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI {0}: {1}.' ret['result'] = False ret['comment'] = msg.format(attr, result_update['error']['message']) else: ret['comment'] = 'Updated ENI {0}.'.format(attr) ret['changes'][attr] = { 'old': metadata[attr], 'new': value } return ret def _eni_groups(metadata, groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} group_ids = [g['id'] for g in metadata['groups']] group_ids.sort() _groups = __salt__['boto_secgroup.convert_to_group_ids']( groups, vpc_id=metadata['vpc_id'], region=region, key=key, keyid=keyid, profile=profile ) if not _groups: ret['comment'] = 'Could not find secgroup ids for provided groups.' ret['result'] = False _groups.sort() if group_ids == _groups: return ret if __opts__['test']: ret['comment'] = 'ENI set to have groups updated.' ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr='groups', value=_groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI groups: {1}.' ret['result'] = False ret['comment'] = msg.format(result_update['error']['message']) else: ret['comment'] = 'Updated ENI groups.' ret['changes']['groups'] = { 'old': group_ids, 'new': _groups } return ret def eni_absent( name, release_eip=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI is absent. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. release_eip True/False - release any EIP associated with the ENI region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' ret['result'] = None return ret else: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' if release_eip and 'allocationId' in r['result']: ret['comment'] = ' '.join([ret['comment'], 'Allocated/associated EIP is set to be released']) ret['result'] = None return ret if 'id' in r['result']['attachment']: result_detach = __salt__['boto_ec2.detach_network_interface']( name=name, force=True, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_detach: ret['result'] = False ret['comment'] = 'Failed to detach ENI: {0}'.format( result_detach['error']['message'] ) return ret # TODO: Ensure the detach occurs before continuing result_delete = __salt__['boto_ec2.delete_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_delete: ret['result'] = False ret['comment'] = 'Failed to delete ENI: {0}'.format( result_delete['error']['message'] ) return ret ret['comment'] = 'Deleted ENI {0}'.format(name) ret['changes']['id'] = None if release_eip and 'allocationId' in r['result']: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=r['result']['allocationId'], region=region, key=key, keyid=keyid, profile=profile) if not _ret: ret['comment'] = ' '.join([ret['comment'], 'Failed to release EIP allocated to the ENI.']) ret['result'] = False return ret else: ret['comment'] = ' '.join([ret['comment'], 'EIP released.']) ret['changes']['eip released'] = True return ret def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.data.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instances: _create = True elif len(instances) > 1: log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id') instance_id = None # No way to know, we'll just have to bail later.... else: instance_id = instances[0] if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if image_ids: image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address %s allocated.", r['public_ip']) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info( 'Waiting up to %s secs for new EIP %s to become available', tries * secs, public_ip or allocation_id ) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: if 'new' not in ret['changes']: ret['changes']['new'] = {} ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) curr = {} if not isinstance(curr, dict) else curr if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True if tags and instance_id is not None: tags = dict(tags) curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id}, region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {})) current = set(curr_tags.keys()) desired = set(tags.keys()) remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set... add = dict([(t, tags[t]) for t in desired - current]) replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)]) # Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative. add.update(replace) if add or remove: if __opts__['test']: ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if instance_name else name) else: if remove: if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while deleting tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret if add: if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while creating tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags return ret def instance_absent(name, instance_name=None, instance_id=None, release_eip=False, region=None, key=None, keyid=None, profile=None, filters=None): ''' Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12 ''' ### TODO - Implement 'force' option?? Would automagically turn off ### 'disableApiTermination', as needed, before trying to delete. ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not instance_id: try: instance_id = __salt__['boto_ec2.get_id'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states, filters=filters) except CommandExecutionError as e: ret['result'] = None ret['comment'] = ("Couldn't determine current status of instance " "{0}.".format(instance_name or name)) return ret instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, return_objs=True, filters=filters) if not instances: ret['result'] = True ret['comment'] = 'Instance {0} is already gone.'.format(instance_id) return ret instance = instances[0] ### Honor 'disableApiTermination' - if you want to override it, first use set_attribute() to turn it off no_can_do = __salt__['boto_ec2.get_attribute']('disableApiTermination', instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) if no_can_do.get('disableApiTermination') is True: ret['result'] = False ret['comment'] = 'Termination of instance {0} via the API is disabled.'.format(instance_id) return ret if __opts__['test']: ret['comment'] = 'The instance {0} is set to be deleted.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.terminate'](instance_id=instance_id, name=instance_name, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to terminate instance {0}.'.format(instance_id) return ret ret['changes']['old'] = {'instance_id': instance_id} ret['changes']['new'] = None if release_eip: ip = getattr(instance, 'ip_address', None) if ip: base_args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} public_ip = None alloc_id = None assoc_id = None if getattr(instance, 'vpc_id', None): r = __salt__['boto_ec2.get_eip_address_info'](addresses=ip, **base_args) if r and 'allocation_id' in r[0]: alloc_id = r[0]['allocation_id'] assoc_id = r[0].get('association_id') else: # I /believe/ this situation is impossible but let's hedge our bets... ret['result'] = False ret['comment'] = "Can't determine AllocationId for address {0}.".format(ip) return ret else: public_ip = instance.ip_address if assoc_id: # Race here - sometimes the terminate above will already have dropped this if not __salt__['boto_ec2.disassociate_eip_address'](association_id=assoc_id, **base_args): log.warning("Failed to disassociate EIP %s.", ip) if __salt__['boto_ec2.release_eip_address'](allocation_id=alloc_id, public_ip=public_ip, **base_args): log.info("Released EIP address %s", public_ip or r[0]['public_ip']) ret['changes']['old']['public_ip'] = public_ip or r[0]['public_ip'] else: ret['result'] = False ret['comment'] = "Failed to release EIP {0}.".format(ip) return ret return ret def volume_absent(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is detached and absent. .. versionadded:: 2016.11.0 name State definition name. volume_name Name tag associated with the volume. For safety, if this matches more than one volume, the state will refuse to apply. volume_id Resource ID of the volume. instance_name Only remove volume if it is attached to instance with this Name tag. Exclusive with 'instance_id'. Requires 'device'. instance_id Only remove volume if it is attached to this instance. Exclusive with 'instance_name'. Requires 'device'. device Match by device rather than ID. Requires one of 'instance_name' or 'instance_id'. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } filters = {} running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id, instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " "'instance_name', or 'instance_id' must be provided.") if (instance_name or instance_id) and not device: raise SaltInvocationError("Parameter 'device' is required when either " "'instance_name' or 'instance_id' is specified.") if volume_id: filters.update({'volume-id': volume_id}) if volume_name: filters.update({'tag:Name': volume_name}) if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instance_id: ret['comment'] = ('Instance with Name {0} not found. Assuming ' 'associated volumes gone.'.format(instance_name)) return ret if instance_id: filters.update({'attachment.instance-id': instance_id}) if device: filters.update({'attachment.device': device}) args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if not vols: ret['comment'] = 'Volume matching criteria not found, assuming already absent' return ret if len(vols) > 1: msg = "More than one volume matched criteria, can't continue in state {0}".format(name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret vol = vols[0] log.info('Matched Volume ID %s', vol) if __opts__['test']: ret['comment'] = 'The volume {0} is set to be deleted.'.format(vol) ret['result'] = None return ret if __salt__['boto_ec2.delete_volume'](volume_id=vol, force=True, **args): ret['comment'] = 'Volume {0} deleted.'.format(vol) ret['changes'] = {'old': {'volume_id': vol}, 'new': {'volume_id': None}} else: ret['comment'] = 'Error deleting volume {0}.'.format(vol) ret['result'] = False return ret def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ''' Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags would be updated.' ret['changes'] = r['changes'] ret['result'] = None else: ret['comment'] = 'Error validating requested volume tags.' ret['result'] = False return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret def volume_present(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} old_dict = {} new_dict = {} running_states = ('running', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " " must be provided.") if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'instance_name', or 'instance_id'" " must be provided.") if device is None: raise SaltInvocationError("Parameter 'device' is required.") args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, in_states=running_states, **args) if not instance_id: raise SaltInvocationError('Instance with Name {0} not found.'.format(instance_name)) instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, return_objs=True, **args) instance = instances[0] if volume_name: filters = {} filters.update({'tag:Name': volume_name}) vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if len(vols) > 1: msg = "More than one volume matched volume name {0}, can't continue in state {1}".format(volume_name, name) raise SaltInvocationError(msg) if not vols: if __opts__['test']: ret['comment'] = ('The volume with name {0} is set to be created and attached' ' on {1}({2}).'.format(volume_name, instance_id, device)) ret['result'] = None return ret _rt = __salt__['boto_ec2.create_volume'](zone_name=instance.placement, size=size, snapshot_id=snapshot_id, volume_type=volume_type, iops=iops, encrypted=encrypted, kms_key_id=kms_key_id, wait_for_creation=True, **args) if 'result' in _rt: volume_id = _rt['result'] else: raise SaltInvocationError('Error creating volume with name {0}.'.format(volume_name)) _rt = __salt__['boto_ec2.set_volumes_tags'](tag_maps=[{ 'filters': {'volume_ids': [volume_id]}, 'tags': {'Name': volume_name} }], **args) if _rt['success'] is False: raise SaltInvocationError('Error updating requested volume ' '{0} with name {1}. {2}'.format(volume_id, volume_name, _rt['comment'])) old_dict['volume_id'] = None new_dict['volume_id'] = volume_id else: volume_id = vols[0] vols = __salt__['boto_ec2.get_all_volumes'](volume_ids=[volume_id], return_objs=True, **args) if not vols: raise SaltInvocationError('Volume {0} do not exist'.format(volume_id)) vol = vols[0] if vol.zone != instance.placement: raise SaltInvocationError(('Volume {0} in {1} cannot attach to instance' ' {2} in {3}.').format(volume_id, vol.zone, instance_id, instance.placement)) attach_data = vol.attach_data if attach_data is not None and attach_data.instance_id is not None: if instance_id == attach_data.instance_id and device == attach_data.device: ret['comment'] = 'The volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device) return ret else: if __opts__['test']: ret['comment'] = ('The volume {0} is set to be detached' ' from {1}({2} and attached on {3}({4}).').format(attach_data.instance_id, attach_data.devic, volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.detach_volume'](volume_id=volume_id, wait_for_detachement=True, **args): ret['comment'] = 'Volume {0} is detached from {1}({2}).'.format(volume_id, attach_data.instance_id, attach_data.device) old_dict['instance_id'] = attach_data.instance_id old_dict['device'] = attach_data.device else: raise SaltInvocationError(('The volume {0} is already attached on instance {1}({2}).' ' Failed to detach').format(volume_id, attach_data.instance_id, attach_data.device)) else: old_dict['instance_id'] = instance_id old_dict['device'] = None if __opts__['test']: ret['comment'] = 'The volume {0} is set to be attached on {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.attach_volume'](volume_id=volume_id, instance_id=instance_id, device=device, **args): ret['comment'] = ' '.join([ ret['comment'], 'Volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device)]) new_dict['instance_id'] = instance_id new_dict['device'] = device ret['changes'] = {'old': old_dict, 'new': new_dict} else: ret['comment'] = 'Error attaching volume {0} to instance {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = False return ret def private_ips_present(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, allow_reassignment=False, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI has secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be present on the ENI. allow_reassignment (Boolean) - If true, will reassign a secondary private ip address associated with another ENI. If false, state will fail if the secondary private ip address is associated with another ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to associate with the " "ENI") ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'old': [], 'new': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any new secondary private ips to add to the eni if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) ips_to_add = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['old']: ips_to_add.append(private_ip) if ips_to_add: if not __opts__['test']: # Assign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_add, 'allow_reassignment': allow_reassignment, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.assign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly assigned to ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_added = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['new']: ips_not_added.append(private_ip) # Display results if ips_not_added: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to add: {1}\n' 'could not add the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_add), '\n\t- ' + '\n\t- '.join(ips_not_added))) else: ret['comment'] = "added ips: {0}".format( '\n\t- ' + '\n\t- '.join(ips_to_add)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to add ret['comment'] = ('ips on eni: {0}\n' 'ips that would be added: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_add))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on eni: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret def private_ips_absent(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI does not have secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be absent on the ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to unassociate with " "the ENI") if not isinstance(private_ip_addresses, list): private_ip_addresses = [private_ip_addresses] ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'new': [], 'old': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any old private ips to remove from the eni primary_private_ip = None if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) if eni_pip['primary']: primary_private_ip = eni_pip['private_ip_address'] ips_to_remove = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['old']: ips_to_remove.append(private_ip) if private_ip == primary_private_ip: ret['result'] = False ret['comment'] = ('You cannot unassign the primary private ip address ({0}) on an ' 'eni\n' 'ips on eni: {1}\n' 'attempted to remove: {2}\n'.format( primary_private_ip, '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(private_ip_addresses))) ret['changes'] = {} return ret if ips_to_remove: if not __opts__['test']: # Unassign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_remove, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.unassign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly unassigned from ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_removed = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['new']: ips_not_removed.append(private_ip) if ips_not_removed: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to remove: {1}\n' 'could not remove the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_remove), '\n\t- ' + '\n\t- '.join(ips_not_removed))) else: ret['comment'] = "removed ips: {0}".format('\n\t- ' + '\n\t- '.join(ips_to_remove)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to remove ret['comment'] = ('ips on eni: {0}\n' 'ips that would be removed: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_remove))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on network interface: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret
saltstack/salt
salt/states/boto_ec2.py
instance_present
python
def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.data.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instances: _create = True elif len(instances) > 1: log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id') instance_id = None # No way to know, we'll just have to bail later.... else: instance_id = instances[0] if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if image_ids: image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address %s allocated.", r['public_ip']) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info( 'Waiting up to %s secs for new EIP %s to become available', tries * secs, public_ip or allocation_id ) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: if 'new' not in ret['changes']: ret['changes']['new'] = {} ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) curr = {} if not isinstance(curr, dict) else curr if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True if tags and instance_id is not None: tags = dict(tags) curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id}, region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {})) current = set(curr_tags.keys()) desired = set(tags.keys()) remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set... add = dict([(t, tags[t]) for t in desired - current]) replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)]) # Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative. add.update(replace) if add or remove: if __opts__['test']: ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if instance_name else name) else: if remove: if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while deleting tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret if add: if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while creating tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags return ret
Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_ec2.py#L589-L988
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n" ]
# -*- coding: utf-8 -*- ''' Manage EC2 .. versionadded:: 2015.8.0 This module provides an interface to the Elastic Compute Cloud (EC2) service from AWS. The below code creates a key pair: .. code-block:: yaml create-key-pair: boto_ec2.key_present: - name: mykeypair - save_private: /root/ - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: 'ssh-rsa AAAA' - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs You can also use salt:// in order to define the public key. .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: salt://mybase/public_key.pub - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs The below code deletes a key pair: .. code-block:: yaml delete-key-pair: boto_ec2.key_absent: - name: mykeypair - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging from time import time, sleep # Import salt libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.utils.data import salt.utils.dictupdate as dictupdate from salt.exceptions import SaltInvocationError, CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' if 'boto_ec2.get_key' in __salt__: return 'boto_ec2' else: return False def key_present(name, save_private=None, upload_public=None, region=None, key=None, keyid=None, profile=None): ''' Ensure key pair is present. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) log.debug('exists is %s', exists) if upload_public is not None and 'salt://' in upload_public: try: upload_public = __salt__['cp.get_file_str'](upload_public) except IOError as e: log.debug(e) ret['comment'] = 'File {0} not found.'.format(upload_public) ret['result'] = False return ret if not exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be created.'.format(name) ret['result'] = None return ret if save_private and not upload_public: created = __salt__['boto_ec2.create_key']( name, save_private, region, key, keyid, profile ) if created: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['new'] = created else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) elif not save_private and upload_public: imported = __salt__['boto_ec2.import_key'](name, upload_public, region, key, keyid, profile) if imported: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['old'] = None ret['changes']['new'] = imported else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) else: ret['result'] = False ret['comment'] = 'You can either upload or download a private key ' else: ret['result'] = True ret['comment'] = 'The key name {0} already exists'.format(name) return ret def key_absent(name, region=None, key=None, keyid=None, profile=None): ''' Deletes a key pair ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be deleted.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_ec2.delete_key'](name, region, key, keyid, profile) log.debug('exists is %s', deleted) if deleted: ret['result'] = True ret['comment'] = 'The key {0} is deleted.'.format(name) ret['changes']['old'] = name else: ret['result'] = False ret['comment'] = 'Could not delete key {0} '.format(name) else: ret['result'] = True ret['comment'] = 'The key name {0} does not exist'.format(name) return ret def eni_present( name, subnet_id=None, subnet_name=None, private_ip_address=None, description=None, groups=None, source_dest_check=True, allocate_eip=None, arecords=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI exists. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. subnet_id The VPC subnet ID the ENI will exist within. subnet_name The VPC subnet name the ENI will exist within. private_ip_address The private ip address to use for this ENI. If this is not specified AWS will automatically assign a private IP address to the ENI. Must be specified at creation time; will be ignored afterward. description Description of the key. groups A list of security groups to apply to the ENI. source_dest_check Boolean specifying whether source/destination checking is enabled on the ENI. allocate_eip allocate and associate an EIP to the ENI. Could be 'standard' to allocate Elastic IP to EC2 region or 'vpc' to get it for a particular VPC .. versionchanged:: 2016.11.0 arecords A list of arecord dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((subnet_id, subnet_name)): raise SaltInvocationError('One (but not both) of subnet_id or ' 'subnet_name must be provided.') if not groups: raise SaltInvocationError('groups is a required argument.') if not isinstance(groups, list): raise SaltInvocationError('groups must be a list.') if not isinstance(source_dest_check, bool): raise SaltInvocationError('source_dest_check must be a bool.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be created.' if allocate_eip: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated/assocaited to the ENI.']) if arecords: ret['comment'] = ' '.join([ret['comment'], 'A records are set to be created.']) ret['result'] = None return ret result_create = __salt__['boto_ec2.create_network_interface']( name, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, description=description, groups=groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_create: ret['result'] = False ret['comment'] = 'Failed to create ENI: {0}'.format( result_create['error']['message'] ) return ret r['result'] = result_create['result'] ret['comment'] = 'Created ENI {0}'.format(name) ret['changes']['id'] = r['result']['id'] else: _ret = _eni_attribute( r['result'], 'description', description, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = _ret['comment'] if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _eni_groups( r['result'], groups, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret # Actions that need to occur whether creating or updating _ret = _eni_attribute( r['result'], 'source_dest_check', source_dest_check, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret if allocate_eip: if 'allocationId' not in r['result']: if __opts__['test']: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated and assocaited to the ENI.']) else: domain = 'vpc' if allocate_eip == 'vpc' else None eip_alloc = __salt__['boto_ec2.allocate_eip_address'](domain=domain, region=region, key=key, keyid=keyid, profile=profile) if eip_alloc: _ret = __salt__['boto_ec2.associate_eip_address'](instance_id=None, instance_name=None, public_ip=None, allocation_id=eip_alloc['allocation_id'], network_interface_id=r['result']['id'], private_ip_address=None, allow_reassociation=False, region=region, key=key, keyid=keyid, profile=profile) if not _ret: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=eip_alloc['allocation_id'], region=region, key=key, keyid=keyid, profile=profile) ret['result'] = False msg = 'Failed to assocaite the allocated EIP address with the ENI. The EIP {0}'.format('was successfully released.' if _ret else 'was NOT RELEASED.') ret['comment'] = ' '.join([ret['comment'], msg]) return ret else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to allocate an EIP address']) return ret else: ret['comment'] = ' '.join([ret['comment'], 'An EIP is already allocated/assocaited to the ENI']) if arecords: for arecord in arecords: if 'name' not in arecord: msg = 'The arecord must contain a "name" property.' raise SaltInvocationError(msg) log.debug('processing arecord %s', arecord) _ret = None dns_provider = 'boto_route53' arecord['record_type'] = 'A' public_ip_arecord = False if 'public' in arecord: public_ip_arecord = arecord.pop('public') if public_ip_arecord: if 'publicIp' in r['result']: arecord['value'] = r['result']['publicIp'] elif 'public_ip' in eip_alloc: arecord['value'] = eip_alloc['public_ip'] else: msg = 'Unable to add an A record for the public IP address, a public IP address does not seem to be allocated to this ENI.' raise CommandExecutionError(msg) else: arecord['value'] = r['result']['private_ip_address'] if 'provider' in arecord: dns_provider = arecord.pop('provider') if dns_provider == 'boto_route53': if 'profile' not in arecord: arecord['profile'] = profile if 'key' not in arecord: arecord['key'] = key if 'keyid' not in arecord: arecord['keyid'] = keyid if 'region' not in arecord: arecord['region'] = region _ret = __states__['.'.join([dns_provider, 'present'])](**arecord) log.debug('ret from dns_provider.present = %s', _ret) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret return ret def _eni_attribute(metadata, attr, value, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if metadata[attr] == value: return ret if __opts__['test']: ret['comment'] = 'ENI set to have {0} updated.'.format(attr) ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr=attr, value=value, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI {0}: {1}.' ret['result'] = False ret['comment'] = msg.format(attr, result_update['error']['message']) else: ret['comment'] = 'Updated ENI {0}.'.format(attr) ret['changes'][attr] = { 'old': metadata[attr], 'new': value } return ret def _eni_groups(metadata, groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} group_ids = [g['id'] for g in metadata['groups']] group_ids.sort() _groups = __salt__['boto_secgroup.convert_to_group_ids']( groups, vpc_id=metadata['vpc_id'], region=region, key=key, keyid=keyid, profile=profile ) if not _groups: ret['comment'] = 'Could not find secgroup ids for provided groups.' ret['result'] = False _groups.sort() if group_ids == _groups: return ret if __opts__['test']: ret['comment'] = 'ENI set to have groups updated.' ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr='groups', value=_groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI groups: {1}.' ret['result'] = False ret['comment'] = msg.format(result_update['error']['message']) else: ret['comment'] = 'Updated ENI groups.' ret['changes']['groups'] = { 'old': group_ids, 'new': _groups } return ret def eni_absent( name, release_eip=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI is absent. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. release_eip True/False - release any EIP associated with the ENI region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' ret['result'] = None return ret else: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' if release_eip and 'allocationId' in r['result']: ret['comment'] = ' '.join([ret['comment'], 'Allocated/associated EIP is set to be released']) ret['result'] = None return ret if 'id' in r['result']['attachment']: result_detach = __salt__['boto_ec2.detach_network_interface']( name=name, force=True, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_detach: ret['result'] = False ret['comment'] = 'Failed to detach ENI: {0}'.format( result_detach['error']['message'] ) return ret # TODO: Ensure the detach occurs before continuing result_delete = __salt__['boto_ec2.delete_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_delete: ret['result'] = False ret['comment'] = 'Failed to delete ENI: {0}'.format( result_delete['error']['message'] ) return ret ret['comment'] = 'Deleted ENI {0}'.format(name) ret['changes']['id'] = None if release_eip and 'allocationId' in r['result']: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=r['result']['allocationId'], region=region, key=key, keyid=keyid, profile=profile) if not _ret: ret['comment'] = ' '.join([ret['comment'], 'Failed to release EIP allocated to the ENI.']) ret['result'] = False return ret else: ret['comment'] = ' '.join([ret['comment'], 'EIP released.']) ret['changes']['eip released'] = True return ret def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs): ''' Create a snapshot from the given instance .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs): ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name) ret['result'] = False return ret ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name) ret['changes']['new'] = {ami_name: ami_name} if not wait_until_available: return ret starttime = time() while True: images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs) if images and images[0].state == 'available': break if time() - starttime > wait_timeout_seconds: if images: ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state) else: ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name) ret['result'] = False return ret sleep(5) return ret def instance_absent(name, instance_name=None, instance_id=None, release_eip=False, region=None, key=None, keyid=None, profile=None, filters=None): ''' Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12 ''' ### TODO - Implement 'force' option?? Would automagically turn off ### 'disableApiTermination', as needed, before trying to delete. ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not instance_id: try: instance_id = __salt__['boto_ec2.get_id'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states, filters=filters) except CommandExecutionError as e: ret['result'] = None ret['comment'] = ("Couldn't determine current status of instance " "{0}.".format(instance_name or name)) return ret instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, return_objs=True, filters=filters) if not instances: ret['result'] = True ret['comment'] = 'Instance {0} is already gone.'.format(instance_id) return ret instance = instances[0] ### Honor 'disableApiTermination' - if you want to override it, first use set_attribute() to turn it off no_can_do = __salt__['boto_ec2.get_attribute']('disableApiTermination', instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) if no_can_do.get('disableApiTermination') is True: ret['result'] = False ret['comment'] = 'Termination of instance {0} via the API is disabled.'.format(instance_id) return ret if __opts__['test']: ret['comment'] = 'The instance {0} is set to be deleted.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.terminate'](instance_id=instance_id, name=instance_name, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to terminate instance {0}.'.format(instance_id) return ret ret['changes']['old'] = {'instance_id': instance_id} ret['changes']['new'] = None if release_eip: ip = getattr(instance, 'ip_address', None) if ip: base_args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} public_ip = None alloc_id = None assoc_id = None if getattr(instance, 'vpc_id', None): r = __salt__['boto_ec2.get_eip_address_info'](addresses=ip, **base_args) if r and 'allocation_id' in r[0]: alloc_id = r[0]['allocation_id'] assoc_id = r[0].get('association_id') else: # I /believe/ this situation is impossible but let's hedge our bets... ret['result'] = False ret['comment'] = "Can't determine AllocationId for address {0}.".format(ip) return ret else: public_ip = instance.ip_address if assoc_id: # Race here - sometimes the terminate above will already have dropped this if not __salt__['boto_ec2.disassociate_eip_address'](association_id=assoc_id, **base_args): log.warning("Failed to disassociate EIP %s.", ip) if __salt__['boto_ec2.release_eip_address'](allocation_id=alloc_id, public_ip=public_ip, **base_args): log.info("Released EIP address %s", public_ip or r[0]['public_ip']) ret['changes']['old']['public_ip'] = public_ip or r[0]['public_ip'] else: ret['result'] = False ret['comment'] = "Failed to release EIP {0}.".format(ip) return ret return ret def volume_absent(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is detached and absent. .. versionadded:: 2016.11.0 name State definition name. volume_name Name tag associated with the volume. For safety, if this matches more than one volume, the state will refuse to apply. volume_id Resource ID of the volume. instance_name Only remove volume if it is attached to instance with this Name tag. Exclusive with 'instance_id'. Requires 'device'. instance_id Only remove volume if it is attached to this instance. Exclusive with 'instance_name'. Requires 'device'. device Match by device rather than ID. Requires one of 'instance_name' or 'instance_id'. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } filters = {} running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id, instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " "'instance_name', or 'instance_id' must be provided.") if (instance_name or instance_id) and not device: raise SaltInvocationError("Parameter 'device' is required when either " "'instance_name' or 'instance_id' is specified.") if volume_id: filters.update({'volume-id': volume_id}) if volume_name: filters.update({'tag:Name': volume_name}) if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instance_id: ret['comment'] = ('Instance with Name {0} not found. Assuming ' 'associated volumes gone.'.format(instance_name)) return ret if instance_id: filters.update({'attachment.instance-id': instance_id}) if device: filters.update({'attachment.device': device}) args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if not vols: ret['comment'] = 'Volume matching criteria not found, assuming already absent' return ret if len(vols) > 1: msg = "More than one volume matched criteria, can't continue in state {0}".format(name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret vol = vols[0] log.info('Matched Volume ID %s', vol) if __opts__['test']: ret['comment'] = 'The volume {0} is set to be deleted.'.format(vol) ret['result'] = None return ret if __salt__['boto_ec2.delete_volume'](volume_id=vol, force=True, **args): ret['comment'] = 'Volume {0} deleted.'.format(vol) ret['changes'] = {'old': {'volume_id': vol}, 'new': {'volume_id': None}} else: ret['comment'] = 'Error deleting volume {0}.'.format(vol) ret['result'] = False return ret def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ''' Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags would be updated.' ret['changes'] = r['changes'] ret['result'] = None else: ret['comment'] = 'Error validating requested volume tags.' ret['result'] = False return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret def volume_present(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} old_dict = {} new_dict = {} running_states = ('running', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " " must be provided.") if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'instance_name', or 'instance_id'" " must be provided.") if device is None: raise SaltInvocationError("Parameter 'device' is required.") args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, in_states=running_states, **args) if not instance_id: raise SaltInvocationError('Instance with Name {0} not found.'.format(instance_name)) instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, return_objs=True, **args) instance = instances[0] if volume_name: filters = {} filters.update({'tag:Name': volume_name}) vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if len(vols) > 1: msg = "More than one volume matched volume name {0}, can't continue in state {1}".format(volume_name, name) raise SaltInvocationError(msg) if not vols: if __opts__['test']: ret['comment'] = ('The volume with name {0} is set to be created and attached' ' on {1}({2}).'.format(volume_name, instance_id, device)) ret['result'] = None return ret _rt = __salt__['boto_ec2.create_volume'](zone_name=instance.placement, size=size, snapshot_id=snapshot_id, volume_type=volume_type, iops=iops, encrypted=encrypted, kms_key_id=kms_key_id, wait_for_creation=True, **args) if 'result' in _rt: volume_id = _rt['result'] else: raise SaltInvocationError('Error creating volume with name {0}.'.format(volume_name)) _rt = __salt__['boto_ec2.set_volumes_tags'](tag_maps=[{ 'filters': {'volume_ids': [volume_id]}, 'tags': {'Name': volume_name} }], **args) if _rt['success'] is False: raise SaltInvocationError('Error updating requested volume ' '{0} with name {1}. {2}'.format(volume_id, volume_name, _rt['comment'])) old_dict['volume_id'] = None new_dict['volume_id'] = volume_id else: volume_id = vols[0] vols = __salt__['boto_ec2.get_all_volumes'](volume_ids=[volume_id], return_objs=True, **args) if not vols: raise SaltInvocationError('Volume {0} do not exist'.format(volume_id)) vol = vols[0] if vol.zone != instance.placement: raise SaltInvocationError(('Volume {0} in {1} cannot attach to instance' ' {2} in {3}.').format(volume_id, vol.zone, instance_id, instance.placement)) attach_data = vol.attach_data if attach_data is not None and attach_data.instance_id is not None: if instance_id == attach_data.instance_id and device == attach_data.device: ret['comment'] = 'The volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device) return ret else: if __opts__['test']: ret['comment'] = ('The volume {0} is set to be detached' ' from {1}({2} and attached on {3}({4}).').format(attach_data.instance_id, attach_data.devic, volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.detach_volume'](volume_id=volume_id, wait_for_detachement=True, **args): ret['comment'] = 'Volume {0} is detached from {1}({2}).'.format(volume_id, attach_data.instance_id, attach_data.device) old_dict['instance_id'] = attach_data.instance_id old_dict['device'] = attach_data.device else: raise SaltInvocationError(('The volume {0} is already attached on instance {1}({2}).' ' Failed to detach').format(volume_id, attach_data.instance_id, attach_data.device)) else: old_dict['instance_id'] = instance_id old_dict['device'] = None if __opts__['test']: ret['comment'] = 'The volume {0} is set to be attached on {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.attach_volume'](volume_id=volume_id, instance_id=instance_id, device=device, **args): ret['comment'] = ' '.join([ ret['comment'], 'Volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device)]) new_dict['instance_id'] = instance_id new_dict['device'] = device ret['changes'] = {'old': old_dict, 'new': new_dict} else: ret['comment'] = 'Error attaching volume {0} to instance {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = False return ret def private_ips_present(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, allow_reassignment=False, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI has secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be present on the ENI. allow_reassignment (Boolean) - If true, will reassign a secondary private ip address associated with another ENI. If false, state will fail if the secondary private ip address is associated with another ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to associate with the " "ENI") ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'old': [], 'new': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any new secondary private ips to add to the eni if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) ips_to_add = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['old']: ips_to_add.append(private_ip) if ips_to_add: if not __opts__['test']: # Assign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_add, 'allow_reassignment': allow_reassignment, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.assign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly assigned to ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_added = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['new']: ips_not_added.append(private_ip) # Display results if ips_not_added: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to add: {1}\n' 'could not add the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_add), '\n\t- ' + '\n\t- '.join(ips_not_added))) else: ret['comment'] = "added ips: {0}".format( '\n\t- ' + '\n\t- '.join(ips_to_add)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to add ret['comment'] = ('ips on eni: {0}\n' 'ips that would be added: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_add))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on eni: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret def private_ips_absent(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI does not have secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be absent on the ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to unassociate with " "the ENI") if not isinstance(private_ip_addresses, list): private_ip_addresses = [private_ip_addresses] ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'new': [], 'old': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any old private ips to remove from the eni primary_private_ip = None if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) if eni_pip['primary']: primary_private_ip = eni_pip['private_ip_address'] ips_to_remove = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['old']: ips_to_remove.append(private_ip) if private_ip == primary_private_ip: ret['result'] = False ret['comment'] = ('You cannot unassign the primary private ip address ({0}) on an ' 'eni\n' 'ips on eni: {1}\n' 'attempted to remove: {2}\n'.format( primary_private_ip, '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(private_ip_addresses))) ret['changes'] = {} return ret if ips_to_remove: if not __opts__['test']: # Unassign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_remove, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.unassign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly unassigned from ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_removed = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['new']: ips_not_removed.append(private_ip) if ips_not_removed: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to remove: {1}\n' 'could not remove the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_remove), '\n\t- ' + '\n\t- '.join(ips_not_removed))) else: ret['comment'] = "removed ips: {0}".format('\n\t- ' + '\n\t- '.join(ips_to_remove)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to remove ret['comment'] = ('ips on eni: {0}\n' 'ips that would be removed: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_remove))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on network interface: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret
saltstack/salt
salt/states/boto_ec2.py
instance_absent
python
def instance_absent(name, instance_name=None, instance_id=None, release_eip=False, region=None, key=None, keyid=None, profile=None, filters=None): ''' Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12 ''' ### TODO - Implement 'force' option?? Would automagically turn off ### 'disableApiTermination', as needed, before trying to delete. ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not instance_id: try: instance_id = __salt__['boto_ec2.get_id'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states, filters=filters) except CommandExecutionError as e: ret['result'] = None ret['comment'] = ("Couldn't determine current status of instance " "{0}.".format(instance_name or name)) return ret instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, return_objs=True, filters=filters) if not instances: ret['result'] = True ret['comment'] = 'Instance {0} is already gone.'.format(instance_id) return ret instance = instances[0] ### Honor 'disableApiTermination' - if you want to override it, first use set_attribute() to turn it off no_can_do = __salt__['boto_ec2.get_attribute']('disableApiTermination', instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) if no_can_do.get('disableApiTermination') is True: ret['result'] = False ret['comment'] = 'Termination of instance {0} via the API is disabled.'.format(instance_id) return ret if __opts__['test']: ret['comment'] = 'The instance {0} is set to be deleted.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.terminate'](instance_id=instance_id, name=instance_name, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to terminate instance {0}.'.format(instance_id) return ret ret['changes']['old'] = {'instance_id': instance_id} ret['changes']['new'] = None if release_eip: ip = getattr(instance, 'ip_address', None) if ip: base_args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} public_ip = None alloc_id = None assoc_id = None if getattr(instance, 'vpc_id', None): r = __salt__['boto_ec2.get_eip_address_info'](addresses=ip, **base_args) if r and 'allocation_id' in r[0]: alloc_id = r[0]['allocation_id'] assoc_id = r[0].get('association_id') else: # I /believe/ this situation is impossible but let's hedge our bets... ret['result'] = False ret['comment'] = "Can't determine AllocationId for address {0}.".format(ip) return ret else: public_ip = instance.ip_address if assoc_id: # Race here - sometimes the terminate above will already have dropped this if not __salt__['boto_ec2.disassociate_eip_address'](association_id=assoc_id, **base_args): log.warning("Failed to disassociate EIP %s.", ip) if __salt__['boto_ec2.release_eip_address'](allocation_id=alloc_id, public_ip=public_ip, **base_args): log.info("Released EIP address %s", public_ip or r[0]['public_ip']) ret['changes']['old']['public_ip'] = public_ip or r[0]['public_ip'] else: ret['result'] = False ret['comment'] = "Failed to release EIP {0}.".format(ip) return ret return ret
Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_ec2.py#L991-L1115
null
# -*- coding: utf-8 -*- ''' Manage EC2 .. versionadded:: 2015.8.0 This module provides an interface to the Elastic Compute Cloud (EC2) service from AWS. The below code creates a key pair: .. code-block:: yaml create-key-pair: boto_ec2.key_present: - name: mykeypair - save_private: /root/ - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: 'ssh-rsa AAAA' - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs You can also use salt:// in order to define the public key. .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: salt://mybase/public_key.pub - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs The below code deletes a key pair: .. code-block:: yaml delete-key-pair: boto_ec2.key_absent: - name: mykeypair - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging from time import time, sleep # Import salt libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.utils.data import salt.utils.dictupdate as dictupdate from salt.exceptions import SaltInvocationError, CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' if 'boto_ec2.get_key' in __salt__: return 'boto_ec2' else: return False def key_present(name, save_private=None, upload_public=None, region=None, key=None, keyid=None, profile=None): ''' Ensure key pair is present. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) log.debug('exists is %s', exists) if upload_public is not None and 'salt://' in upload_public: try: upload_public = __salt__['cp.get_file_str'](upload_public) except IOError as e: log.debug(e) ret['comment'] = 'File {0} not found.'.format(upload_public) ret['result'] = False return ret if not exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be created.'.format(name) ret['result'] = None return ret if save_private and not upload_public: created = __salt__['boto_ec2.create_key']( name, save_private, region, key, keyid, profile ) if created: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['new'] = created else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) elif not save_private and upload_public: imported = __salt__['boto_ec2.import_key'](name, upload_public, region, key, keyid, profile) if imported: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['old'] = None ret['changes']['new'] = imported else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) else: ret['result'] = False ret['comment'] = 'You can either upload or download a private key ' else: ret['result'] = True ret['comment'] = 'The key name {0} already exists'.format(name) return ret def key_absent(name, region=None, key=None, keyid=None, profile=None): ''' Deletes a key pair ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be deleted.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_ec2.delete_key'](name, region, key, keyid, profile) log.debug('exists is %s', deleted) if deleted: ret['result'] = True ret['comment'] = 'The key {0} is deleted.'.format(name) ret['changes']['old'] = name else: ret['result'] = False ret['comment'] = 'Could not delete key {0} '.format(name) else: ret['result'] = True ret['comment'] = 'The key name {0} does not exist'.format(name) return ret def eni_present( name, subnet_id=None, subnet_name=None, private_ip_address=None, description=None, groups=None, source_dest_check=True, allocate_eip=None, arecords=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI exists. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. subnet_id The VPC subnet ID the ENI will exist within. subnet_name The VPC subnet name the ENI will exist within. private_ip_address The private ip address to use for this ENI. If this is not specified AWS will automatically assign a private IP address to the ENI. Must be specified at creation time; will be ignored afterward. description Description of the key. groups A list of security groups to apply to the ENI. source_dest_check Boolean specifying whether source/destination checking is enabled on the ENI. allocate_eip allocate and associate an EIP to the ENI. Could be 'standard' to allocate Elastic IP to EC2 region or 'vpc' to get it for a particular VPC .. versionchanged:: 2016.11.0 arecords A list of arecord dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((subnet_id, subnet_name)): raise SaltInvocationError('One (but not both) of subnet_id or ' 'subnet_name must be provided.') if not groups: raise SaltInvocationError('groups is a required argument.') if not isinstance(groups, list): raise SaltInvocationError('groups must be a list.') if not isinstance(source_dest_check, bool): raise SaltInvocationError('source_dest_check must be a bool.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be created.' if allocate_eip: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated/assocaited to the ENI.']) if arecords: ret['comment'] = ' '.join([ret['comment'], 'A records are set to be created.']) ret['result'] = None return ret result_create = __salt__['boto_ec2.create_network_interface']( name, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, description=description, groups=groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_create: ret['result'] = False ret['comment'] = 'Failed to create ENI: {0}'.format( result_create['error']['message'] ) return ret r['result'] = result_create['result'] ret['comment'] = 'Created ENI {0}'.format(name) ret['changes']['id'] = r['result']['id'] else: _ret = _eni_attribute( r['result'], 'description', description, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = _ret['comment'] if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _eni_groups( r['result'], groups, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret # Actions that need to occur whether creating or updating _ret = _eni_attribute( r['result'], 'source_dest_check', source_dest_check, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret if allocate_eip: if 'allocationId' not in r['result']: if __opts__['test']: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated and assocaited to the ENI.']) else: domain = 'vpc' if allocate_eip == 'vpc' else None eip_alloc = __salt__['boto_ec2.allocate_eip_address'](domain=domain, region=region, key=key, keyid=keyid, profile=profile) if eip_alloc: _ret = __salt__['boto_ec2.associate_eip_address'](instance_id=None, instance_name=None, public_ip=None, allocation_id=eip_alloc['allocation_id'], network_interface_id=r['result']['id'], private_ip_address=None, allow_reassociation=False, region=region, key=key, keyid=keyid, profile=profile) if not _ret: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=eip_alloc['allocation_id'], region=region, key=key, keyid=keyid, profile=profile) ret['result'] = False msg = 'Failed to assocaite the allocated EIP address with the ENI. The EIP {0}'.format('was successfully released.' if _ret else 'was NOT RELEASED.') ret['comment'] = ' '.join([ret['comment'], msg]) return ret else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to allocate an EIP address']) return ret else: ret['comment'] = ' '.join([ret['comment'], 'An EIP is already allocated/assocaited to the ENI']) if arecords: for arecord in arecords: if 'name' not in arecord: msg = 'The arecord must contain a "name" property.' raise SaltInvocationError(msg) log.debug('processing arecord %s', arecord) _ret = None dns_provider = 'boto_route53' arecord['record_type'] = 'A' public_ip_arecord = False if 'public' in arecord: public_ip_arecord = arecord.pop('public') if public_ip_arecord: if 'publicIp' in r['result']: arecord['value'] = r['result']['publicIp'] elif 'public_ip' in eip_alloc: arecord['value'] = eip_alloc['public_ip'] else: msg = 'Unable to add an A record for the public IP address, a public IP address does not seem to be allocated to this ENI.' raise CommandExecutionError(msg) else: arecord['value'] = r['result']['private_ip_address'] if 'provider' in arecord: dns_provider = arecord.pop('provider') if dns_provider == 'boto_route53': if 'profile' not in arecord: arecord['profile'] = profile if 'key' not in arecord: arecord['key'] = key if 'keyid' not in arecord: arecord['keyid'] = keyid if 'region' not in arecord: arecord['region'] = region _ret = __states__['.'.join([dns_provider, 'present'])](**arecord) log.debug('ret from dns_provider.present = %s', _ret) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret return ret def _eni_attribute(metadata, attr, value, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if metadata[attr] == value: return ret if __opts__['test']: ret['comment'] = 'ENI set to have {0} updated.'.format(attr) ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr=attr, value=value, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI {0}: {1}.' ret['result'] = False ret['comment'] = msg.format(attr, result_update['error']['message']) else: ret['comment'] = 'Updated ENI {0}.'.format(attr) ret['changes'][attr] = { 'old': metadata[attr], 'new': value } return ret def _eni_groups(metadata, groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} group_ids = [g['id'] for g in metadata['groups']] group_ids.sort() _groups = __salt__['boto_secgroup.convert_to_group_ids']( groups, vpc_id=metadata['vpc_id'], region=region, key=key, keyid=keyid, profile=profile ) if not _groups: ret['comment'] = 'Could not find secgroup ids for provided groups.' ret['result'] = False _groups.sort() if group_ids == _groups: return ret if __opts__['test']: ret['comment'] = 'ENI set to have groups updated.' ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr='groups', value=_groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI groups: {1}.' ret['result'] = False ret['comment'] = msg.format(result_update['error']['message']) else: ret['comment'] = 'Updated ENI groups.' ret['changes']['groups'] = { 'old': group_ids, 'new': _groups } return ret def eni_absent( name, release_eip=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI is absent. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. release_eip True/False - release any EIP associated with the ENI region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' ret['result'] = None return ret else: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' if release_eip and 'allocationId' in r['result']: ret['comment'] = ' '.join([ret['comment'], 'Allocated/associated EIP is set to be released']) ret['result'] = None return ret if 'id' in r['result']['attachment']: result_detach = __salt__['boto_ec2.detach_network_interface']( name=name, force=True, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_detach: ret['result'] = False ret['comment'] = 'Failed to detach ENI: {0}'.format( result_detach['error']['message'] ) return ret # TODO: Ensure the detach occurs before continuing result_delete = __salt__['boto_ec2.delete_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_delete: ret['result'] = False ret['comment'] = 'Failed to delete ENI: {0}'.format( result_delete['error']['message'] ) return ret ret['comment'] = 'Deleted ENI {0}'.format(name) ret['changes']['id'] = None if release_eip and 'allocationId' in r['result']: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=r['result']['allocationId'], region=region, key=key, keyid=keyid, profile=profile) if not _ret: ret['comment'] = ' '.join([ret['comment'], 'Failed to release EIP allocated to the ENI.']) ret['result'] = False return ret else: ret['comment'] = ' '.join([ret['comment'], 'EIP released.']) ret['changes']['eip released'] = True return ret def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs): ''' Create a snapshot from the given instance .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs): ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name) ret['result'] = False return ret ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name) ret['changes']['new'] = {ami_name: ami_name} if not wait_until_available: return ret starttime = time() while True: images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs) if images and images[0].state == 'available': break if time() - starttime > wait_timeout_seconds: if images: ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state) else: ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name) ret['result'] = False return ret sleep(5) return ret def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.data.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instances: _create = True elif len(instances) > 1: log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id') instance_id = None # No way to know, we'll just have to bail later.... else: instance_id = instances[0] if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if image_ids: image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address %s allocated.", r['public_ip']) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info( 'Waiting up to %s secs for new EIP %s to become available', tries * secs, public_ip or allocation_id ) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: if 'new' not in ret['changes']: ret['changes']['new'] = {} ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) curr = {} if not isinstance(curr, dict) else curr if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True if tags and instance_id is not None: tags = dict(tags) curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id}, region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {})) current = set(curr_tags.keys()) desired = set(tags.keys()) remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set... add = dict([(t, tags[t]) for t in desired - current]) replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)]) # Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative. add.update(replace) if add or remove: if __opts__['test']: ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if instance_name else name) else: if remove: if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while deleting tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret if add: if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while creating tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags return ret def volume_absent(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is detached and absent. .. versionadded:: 2016.11.0 name State definition name. volume_name Name tag associated with the volume. For safety, if this matches more than one volume, the state will refuse to apply. volume_id Resource ID of the volume. instance_name Only remove volume if it is attached to instance with this Name tag. Exclusive with 'instance_id'. Requires 'device'. instance_id Only remove volume if it is attached to this instance. Exclusive with 'instance_name'. Requires 'device'. device Match by device rather than ID. Requires one of 'instance_name' or 'instance_id'. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } filters = {} running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id, instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " "'instance_name', or 'instance_id' must be provided.") if (instance_name or instance_id) and not device: raise SaltInvocationError("Parameter 'device' is required when either " "'instance_name' or 'instance_id' is specified.") if volume_id: filters.update({'volume-id': volume_id}) if volume_name: filters.update({'tag:Name': volume_name}) if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instance_id: ret['comment'] = ('Instance with Name {0} not found. Assuming ' 'associated volumes gone.'.format(instance_name)) return ret if instance_id: filters.update({'attachment.instance-id': instance_id}) if device: filters.update({'attachment.device': device}) args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if not vols: ret['comment'] = 'Volume matching criteria not found, assuming already absent' return ret if len(vols) > 1: msg = "More than one volume matched criteria, can't continue in state {0}".format(name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret vol = vols[0] log.info('Matched Volume ID %s', vol) if __opts__['test']: ret['comment'] = 'The volume {0} is set to be deleted.'.format(vol) ret['result'] = None return ret if __salt__['boto_ec2.delete_volume'](volume_id=vol, force=True, **args): ret['comment'] = 'Volume {0} deleted.'.format(vol) ret['changes'] = {'old': {'volume_id': vol}, 'new': {'volume_id': None}} else: ret['comment'] = 'Error deleting volume {0}.'.format(vol) ret['result'] = False return ret def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ''' Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags would be updated.' ret['changes'] = r['changes'] ret['result'] = None else: ret['comment'] = 'Error validating requested volume tags.' ret['result'] = False return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret def volume_present(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} old_dict = {} new_dict = {} running_states = ('running', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " " must be provided.") if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'instance_name', or 'instance_id'" " must be provided.") if device is None: raise SaltInvocationError("Parameter 'device' is required.") args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, in_states=running_states, **args) if not instance_id: raise SaltInvocationError('Instance with Name {0} not found.'.format(instance_name)) instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, return_objs=True, **args) instance = instances[0] if volume_name: filters = {} filters.update({'tag:Name': volume_name}) vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if len(vols) > 1: msg = "More than one volume matched volume name {0}, can't continue in state {1}".format(volume_name, name) raise SaltInvocationError(msg) if not vols: if __opts__['test']: ret['comment'] = ('The volume with name {0} is set to be created and attached' ' on {1}({2}).'.format(volume_name, instance_id, device)) ret['result'] = None return ret _rt = __salt__['boto_ec2.create_volume'](zone_name=instance.placement, size=size, snapshot_id=snapshot_id, volume_type=volume_type, iops=iops, encrypted=encrypted, kms_key_id=kms_key_id, wait_for_creation=True, **args) if 'result' in _rt: volume_id = _rt['result'] else: raise SaltInvocationError('Error creating volume with name {0}.'.format(volume_name)) _rt = __salt__['boto_ec2.set_volumes_tags'](tag_maps=[{ 'filters': {'volume_ids': [volume_id]}, 'tags': {'Name': volume_name} }], **args) if _rt['success'] is False: raise SaltInvocationError('Error updating requested volume ' '{0} with name {1}. {2}'.format(volume_id, volume_name, _rt['comment'])) old_dict['volume_id'] = None new_dict['volume_id'] = volume_id else: volume_id = vols[0] vols = __salt__['boto_ec2.get_all_volumes'](volume_ids=[volume_id], return_objs=True, **args) if not vols: raise SaltInvocationError('Volume {0} do not exist'.format(volume_id)) vol = vols[0] if vol.zone != instance.placement: raise SaltInvocationError(('Volume {0} in {1} cannot attach to instance' ' {2} in {3}.').format(volume_id, vol.zone, instance_id, instance.placement)) attach_data = vol.attach_data if attach_data is not None and attach_data.instance_id is not None: if instance_id == attach_data.instance_id and device == attach_data.device: ret['comment'] = 'The volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device) return ret else: if __opts__['test']: ret['comment'] = ('The volume {0} is set to be detached' ' from {1}({2} and attached on {3}({4}).').format(attach_data.instance_id, attach_data.devic, volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.detach_volume'](volume_id=volume_id, wait_for_detachement=True, **args): ret['comment'] = 'Volume {0} is detached from {1}({2}).'.format(volume_id, attach_data.instance_id, attach_data.device) old_dict['instance_id'] = attach_data.instance_id old_dict['device'] = attach_data.device else: raise SaltInvocationError(('The volume {0} is already attached on instance {1}({2}).' ' Failed to detach').format(volume_id, attach_data.instance_id, attach_data.device)) else: old_dict['instance_id'] = instance_id old_dict['device'] = None if __opts__['test']: ret['comment'] = 'The volume {0} is set to be attached on {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.attach_volume'](volume_id=volume_id, instance_id=instance_id, device=device, **args): ret['comment'] = ' '.join([ ret['comment'], 'Volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device)]) new_dict['instance_id'] = instance_id new_dict['device'] = device ret['changes'] = {'old': old_dict, 'new': new_dict} else: ret['comment'] = 'Error attaching volume {0} to instance {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = False return ret def private_ips_present(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, allow_reassignment=False, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI has secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be present on the ENI. allow_reassignment (Boolean) - If true, will reassign a secondary private ip address associated with another ENI. If false, state will fail if the secondary private ip address is associated with another ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to associate with the " "ENI") ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'old': [], 'new': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any new secondary private ips to add to the eni if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) ips_to_add = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['old']: ips_to_add.append(private_ip) if ips_to_add: if not __opts__['test']: # Assign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_add, 'allow_reassignment': allow_reassignment, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.assign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly assigned to ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_added = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['new']: ips_not_added.append(private_ip) # Display results if ips_not_added: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to add: {1}\n' 'could not add the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_add), '\n\t- ' + '\n\t- '.join(ips_not_added))) else: ret['comment'] = "added ips: {0}".format( '\n\t- ' + '\n\t- '.join(ips_to_add)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to add ret['comment'] = ('ips on eni: {0}\n' 'ips that would be added: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_add))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on eni: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret def private_ips_absent(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI does not have secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be absent on the ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to unassociate with " "the ENI") if not isinstance(private_ip_addresses, list): private_ip_addresses = [private_ip_addresses] ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'new': [], 'old': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any old private ips to remove from the eni primary_private_ip = None if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) if eni_pip['primary']: primary_private_ip = eni_pip['private_ip_address'] ips_to_remove = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['old']: ips_to_remove.append(private_ip) if private_ip == primary_private_ip: ret['result'] = False ret['comment'] = ('You cannot unassign the primary private ip address ({0}) on an ' 'eni\n' 'ips on eni: {1}\n' 'attempted to remove: {2}\n'.format( primary_private_ip, '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(private_ip_addresses))) ret['changes'] = {} return ret if ips_to_remove: if not __opts__['test']: # Unassign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_remove, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.unassign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly unassigned from ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_removed = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['new']: ips_not_removed.append(private_ip) if ips_not_removed: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to remove: {1}\n' 'could not remove the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_remove), '\n\t- ' + '\n\t- '.join(ips_not_removed))) else: ret['comment'] = "removed ips: {0}".format('\n\t- ' + '\n\t- '.join(ips_to_remove)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to remove ret['comment'] = ('ips on eni: {0}\n' 'ips that would be removed: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_remove))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on network interface: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret
saltstack/salt
salt/states/boto_ec2.py
volume_absent
python
def volume_absent(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is detached and absent. .. versionadded:: 2016.11.0 name State definition name. volume_name Name tag associated with the volume. For safety, if this matches more than one volume, the state will refuse to apply. volume_id Resource ID of the volume. instance_name Only remove volume if it is attached to instance with this Name tag. Exclusive with 'instance_id'. Requires 'device'. instance_id Only remove volume if it is attached to this instance. Exclusive with 'instance_name'. Requires 'device'. device Match by device rather than ID. Requires one of 'instance_name' or 'instance_id'. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } filters = {} running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id, instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " "'instance_name', or 'instance_id' must be provided.") if (instance_name or instance_id) and not device: raise SaltInvocationError("Parameter 'device' is required when either " "'instance_name' or 'instance_id' is specified.") if volume_id: filters.update({'volume-id': volume_id}) if volume_name: filters.update({'tag:Name': volume_name}) if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instance_id: ret['comment'] = ('Instance with Name {0} not found. Assuming ' 'associated volumes gone.'.format(instance_name)) return ret if instance_id: filters.update({'attachment.instance-id': instance_id}) if device: filters.update({'attachment.device': device}) args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if not vols: ret['comment'] = 'Volume matching criteria not found, assuming already absent' return ret if len(vols) > 1: msg = "More than one volume matched criteria, can't continue in state {0}".format(name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret vol = vols[0] log.info('Matched Volume ID %s', vol) if __opts__['test']: ret['comment'] = 'The volume {0} is set to be deleted.'.format(vol) ret['result'] = None return ret if __salt__['boto_ec2.delete_volume'](volume_id=vol, force=True, **args): ret['comment'] = 'Volume {0} deleted.'.format(vol) ret['changes'] = {'old': {'volume_id': vol}, 'new': {'volume_id': None}} else: ret['comment'] = 'Error deleting volume {0}.'.format(vol) ret['result'] = False return ret
Ensure the EC2 volume is detached and absent. .. versionadded:: 2016.11.0 name State definition name. volume_name Name tag associated with the volume. For safety, if this matches more than one volume, the state will refuse to apply. volume_id Resource ID of the volume. instance_name Only remove volume if it is attached to instance with this Name tag. Exclusive with 'instance_id'. Requires 'device'. instance_id Only remove volume if it is attached to this instance. Exclusive with 'instance_name'. Requires 'device'. device Match by device rather than ID. Requires one of 'instance_name' or 'instance_id'. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_ec2.py#L1118-L1218
null
# -*- coding: utf-8 -*- ''' Manage EC2 .. versionadded:: 2015.8.0 This module provides an interface to the Elastic Compute Cloud (EC2) service from AWS. The below code creates a key pair: .. code-block:: yaml create-key-pair: boto_ec2.key_present: - name: mykeypair - save_private: /root/ - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: 'ssh-rsa AAAA' - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs You can also use salt:// in order to define the public key. .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: salt://mybase/public_key.pub - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs The below code deletes a key pair: .. code-block:: yaml delete-key-pair: boto_ec2.key_absent: - name: mykeypair - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging from time import time, sleep # Import salt libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.utils.data import salt.utils.dictupdate as dictupdate from salt.exceptions import SaltInvocationError, CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' if 'boto_ec2.get_key' in __salt__: return 'boto_ec2' else: return False def key_present(name, save_private=None, upload_public=None, region=None, key=None, keyid=None, profile=None): ''' Ensure key pair is present. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) log.debug('exists is %s', exists) if upload_public is not None and 'salt://' in upload_public: try: upload_public = __salt__['cp.get_file_str'](upload_public) except IOError as e: log.debug(e) ret['comment'] = 'File {0} not found.'.format(upload_public) ret['result'] = False return ret if not exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be created.'.format(name) ret['result'] = None return ret if save_private and not upload_public: created = __salt__['boto_ec2.create_key']( name, save_private, region, key, keyid, profile ) if created: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['new'] = created else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) elif not save_private and upload_public: imported = __salt__['boto_ec2.import_key'](name, upload_public, region, key, keyid, profile) if imported: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['old'] = None ret['changes']['new'] = imported else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) else: ret['result'] = False ret['comment'] = 'You can either upload or download a private key ' else: ret['result'] = True ret['comment'] = 'The key name {0} already exists'.format(name) return ret def key_absent(name, region=None, key=None, keyid=None, profile=None): ''' Deletes a key pair ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be deleted.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_ec2.delete_key'](name, region, key, keyid, profile) log.debug('exists is %s', deleted) if deleted: ret['result'] = True ret['comment'] = 'The key {0} is deleted.'.format(name) ret['changes']['old'] = name else: ret['result'] = False ret['comment'] = 'Could not delete key {0} '.format(name) else: ret['result'] = True ret['comment'] = 'The key name {0} does not exist'.format(name) return ret def eni_present( name, subnet_id=None, subnet_name=None, private_ip_address=None, description=None, groups=None, source_dest_check=True, allocate_eip=None, arecords=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI exists. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. subnet_id The VPC subnet ID the ENI will exist within. subnet_name The VPC subnet name the ENI will exist within. private_ip_address The private ip address to use for this ENI. If this is not specified AWS will automatically assign a private IP address to the ENI. Must be specified at creation time; will be ignored afterward. description Description of the key. groups A list of security groups to apply to the ENI. source_dest_check Boolean specifying whether source/destination checking is enabled on the ENI. allocate_eip allocate and associate an EIP to the ENI. Could be 'standard' to allocate Elastic IP to EC2 region or 'vpc' to get it for a particular VPC .. versionchanged:: 2016.11.0 arecords A list of arecord dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((subnet_id, subnet_name)): raise SaltInvocationError('One (but not both) of subnet_id or ' 'subnet_name must be provided.') if not groups: raise SaltInvocationError('groups is a required argument.') if not isinstance(groups, list): raise SaltInvocationError('groups must be a list.') if not isinstance(source_dest_check, bool): raise SaltInvocationError('source_dest_check must be a bool.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be created.' if allocate_eip: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated/assocaited to the ENI.']) if arecords: ret['comment'] = ' '.join([ret['comment'], 'A records are set to be created.']) ret['result'] = None return ret result_create = __salt__['boto_ec2.create_network_interface']( name, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, description=description, groups=groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_create: ret['result'] = False ret['comment'] = 'Failed to create ENI: {0}'.format( result_create['error']['message'] ) return ret r['result'] = result_create['result'] ret['comment'] = 'Created ENI {0}'.format(name) ret['changes']['id'] = r['result']['id'] else: _ret = _eni_attribute( r['result'], 'description', description, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = _ret['comment'] if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _eni_groups( r['result'], groups, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret # Actions that need to occur whether creating or updating _ret = _eni_attribute( r['result'], 'source_dest_check', source_dest_check, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret if allocate_eip: if 'allocationId' not in r['result']: if __opts__['test']: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated and assocaited to the ENI.']) else: domain = 'vpc' if allocate_eip == 'vpc' else None eip_alloc = __salt__['boto_ec2.allocate_eip_address'](domain=domain, region=region, key=key, keyid=keyid, profile=profile) if eip_alloc: _ret = __salt__['boto_ec2.associate_eip_address'](instance_id=None, instance_name=None, public_ip=None, allocation_id=eip_alloc['allocation_id'], network_interface_id=r['result']['id'], private_ip_address=None, allow_reassociation=False, region=region, key=key, keyid=keyid, profile=profile) if not _ret: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=eip_alloc['allocation_id'], region=region, key=key, keyid=keyid, profile=profile) ret['result'] = False msg = 'Failed to assocaite the allocated EIP address with the ENI. The EIP {0}'.format('was successfully released.' if _ret else 'was NOT RELEASED.') ret['comment'] = ' '.join([ret['comment'], msg]) return ret else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to allocate an EIP address']) return ret else: ret['comment'] = ' '.join([ret['comment'], 'An EIP is already allocated/assocaited to the ENI']) if arecords: for arecord in arecords: if 'name' not in arecord: msg = 'The arecord must contain a "name" property.' raise SaltInvocationError(msg) log.debug('processing arecord %s', arecord) _ret = None dns_provider = 'boto_route53' arecord['record_type'] = 'A' public_ip_arecord = False if 'public' in arecord: public_ip_arecord = arecord.pop('public') if public_ip_arecord: if 'publicIp' in r['result']: arecord['value'] = r['result']['publicIp'] elif 'public_ip' in eip_alloc: arecord['value'] = eip_alloc['public_ip'] else: msg = 'Unable to add an A record for the public IP address, a public IP address does not seem to be allocated to this ENI.' raise CommandExecutionError(msg) else: arecord['value'] = r['result']['private_ip_address'] if 'provider' in arecord: dns_provider = arecord.pop('provider') if dns_provider == 'boto_route53': if 'profile' not in arecord: arecord['profile'] = profile if 'key' not in arecord: arecord['key'] = key if 'keyid' not in arecord: arecord['keyid'] = keyid if 'region' not in arecord: arecord['region'] = region _ret = __states__['.'.join([dns_provider, 'present'])](**arecord) log.debug('ret from dns_provider.present = %s', _ret) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret return ret def _eni_attribute(metadata, attr, value, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if metadata[attr] == value: return ret if __opts__['test']: ret['comment'] = 'ENI set to have {0} updated.'.format(attr) ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr=attr, value=value, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI {0}: {1}.' ret['result'] = False ret['comment'] = msg.format(attr, result_update['error']['message']) else: ret['comment'] = 'Updated ENI {0}.'.format(attr) ret['changes'][attr] = { 'old': metadata[attr], 'new': value } return ret def _eni_groups(metadata, groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} group_ids = [g['id'] for g in metadata['groups']] group_ids.sort() _groups = __salt__['boto_secgroup.convert_to_group_ids']( groups, vpc_id=metadata['vpc_id'], region=region, key=key, keyid=keyid, profile=profile ) if not _groups: ret['comment'] = 'Could not find secgroup ids for provided groups.' ret['result'] = False _groups.sort() if group_ids == _groups: return ret if __opts__['test']: ret['comment'] = 'ENI set to have groups updated.' ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr='groups', value=_groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI groups: {1}.' ret['result'] = False ret['comment'] = msg.format(result_update['error']['message']) else: ret['comment'] = 'Updated ENI groups.' ret['changes']['groups'] = { 'old': group_ids, 'new': _groups } return ret def eni_absent( name, release_eip=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI is absent. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. release_eip True/False - release any EIP associated with the ENI region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' ret['result'] = None return ret else: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' if release_eip and 'allocationId' in r['result']: ret['comment'] = ' '.join([ret['comment'], 'Allocated/associated EIP is set to be released']) ret['result'] = None return ret if 'id' in r['result']['attachment']: result_detach = __salt__['boto_ec2.detach_network_interface']( name=name, force=True, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_detach: ret['result'] = False ret['comment'] = 'Failed to detach ENI: {0}'.format( result_detach['error']['message'] ) return ret # TODO: Ensure the detach occurs before continuing result_delete = __salt__['boto_ec2.delete_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_delete: ret['result'] = False ret['comment'] = 'Failed to delete ENI: {0}'.format( result_delete['error']['message'] ) return ret ret['comment'] = 'Deleted ENI {0}'.format(name) ret['changes']['id'] = None if release_eip and 'allocationId' in r['result']: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=r['result']['allocationId'], region=region, key=key, keyid=keyid, profile=profile) if not _ret: ret['comment'] = ' '.join([ret['comment'], 'Failed to release EIP allocated to the ENI.']) ret['result'] = False return ret else: ret['comment'] = ' '.join([ret['comment'], 'EIP released.']) ret['changes']['eip released'] = True return ret def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs): ''' Create a snapshot from the given instance .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs): ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name) ret['result'] = False return ret ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name) ret['changes']['new'] = {ami_name: ami_name} if not wait_until_available: return ret starttime = time() while True: images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs) if images and images[0].state == 'available': break if time() - starttime > wait_timeout_seconds: if images: ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state) else: ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name) ret['result'] = False return ret sleep(5) return ret def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.data.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instances: _create = True elif len(instances) > 1: log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id') instance_id = None # No way to know, we'll just have to bail later.... else: instance_id = instances[0] if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if image_ids: image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address %s allocated.", r['public_ip']) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info( 'Waiting up to %s secs for new EIP %s to become available', tries * secs, public_ip or allocation_id ) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: if 'new' not in ret['changes']: ret['changes']['new'] = {} ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) curr = {} if not isinstance(curr, dict) else curr if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True if tags and instance_id is not None: tags = dict(tags) curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id}, region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {})) current = set(curr_tags.keys()) desired = set(tags.keys()) remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set... add = dict([(t, tags[t]) for t in desired - current]) replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)]) # Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative. add.update(replace) if add or remove: if __opts__['test']: ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if instance_name else name) else: if remove: if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while deleting tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret if add: if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while creating tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags return ret def instance_absent(name, instance_name=None, instance_id=None, release_eip=False, region=None, key=None, keyid=None, profile=None, filters=None): ''' Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12 ''' ### TODO - Implement 'force' option?? Would automagically turn off ### 'disableApiTermination', as needed, before trying to delete. ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not instance_id: try: instance_id = __salt__['boto_ec2.get_id'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states, filters=filters) except CommandExecutionError as e: ret['result'] = None ret['comment'] = ("Couldn't determine current status of instance " "{0}.".format(instance_name or name)) return ret instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, return_objs=True, filters=filters) if not instances: ret['result'] = True ret['comment'] = 'Instance {0} is already gone.'.format(instance_id) return ret instance = instances[0] ### Honor 'disableApiTermination' - if you want to override it, first use set_attribute() to turn it off no_can_do = __salt__['boto_ec2.get_attribute']('disableApiTermination', instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) if no_can_do.get('disableApiTermination') is True: ret['result'] = False ret['comment'] = 'Termination of instance {0} via the API is disabled.'.format(instance_id) return ret if __opts__['test']: ret['comment'] = 'The instance {0} is set to be deleted.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.terminate'](instance_id=instance_id, name=instance_name, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to terminate instance {0}.'.format(instance_id) return ret ret['changes']['old'] = {'instance_id': instance_id} ret['changes']['new'] = None if release_eip: ip = getattr(instance, 'ip_address', None) if ip: base_args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} public_ip = None alloc_id = None assoc_id = None if getattr(instance, 'vpc_id', None): r = __salt__['boto_ec2.get_eip_address_info'](addresses=ip, **base_args) if r and 'allocation_id' in r[0]: alloc_id = r[0]['allocation_id'] assoc_id = r[0].get('association_id') else: # I /believe/ this situation is impossible but let's hedge our bets... ret['result'] = False ret['comment'] = "Can't determine AllocationId for address {0}.".format(ip) return ret else: public_ip = instance.ip_address if assoc_id: # Race here - sometimes the terminate above will already have dropped this if not __salt__['boto_ec2.disassociate_eip_address'](association_id=assoc_id, **base_args): log.warning("Failed to disassociate EIP %s.", ip) if __salt__['boto_ec2.release_eip_address'](allocation_id=alloc_id, public_ip=public_ip, **base_args): log.info("Released EIP address %s", public_ip or r[0]['public_ip']) ret['changes']['old']['public_ip'] = public_ip or r[0]['public_ip'] else: ret['result'] = False ret['comment'] = "Failed to release EIP {0}.".format(ip) return ret return ret def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ''' Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags would be updated.' ret['changes'] = r['changes'] ret['result'] = None else: ret['comment'] = 'Error validating requested volume tags.' ret['result'] = False return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret def volume_present(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} old_dict = {} new_dict = {} running_states = ('running', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " " must be provided.") if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'instance_name', or 'instance_id'" " must be provided.") if device is None: raise SaltInvocationError("Parameter 'device' is required.") args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, in_states=running_states, **args) if not instance_id: raise SaltInvocationError('Instance with Name {0} not found.'.format(instance_name)) instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, return_objs=True, **args) instance = instances[0] if volume_name: filters = {} filters.update({'tag:Name': volume_name}) vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if len(vols) > 1: msg = "More than one volume matched volume name {0}, can't continue in state {1}".format(volume_name, name) raise SaltInvocationError(msg) if not vols: if __opts__['test']: ret['comment'] = ('The volume with name {0} is set to be created and attached' ' on {1}({2}).'.format(volume_name, instance_id, device)) ret['result'] = None return ret _rt = __salt__['boto_ec2.create_volume'](zone_name=instance.placement, size=size, snapshot_id=snapshot_id, volume_type=volume_type, iops=iops, encrypted=encrypted, kms_key_id=kms_key_id, wait_for_creation=True, **args) if 'result' in _rt: volume_id = _rt['result'] else: raise SaltInvocationError('Error creating volume with name {0}.'.format(volume_name)) _rt = __salt__['boto_ec2.set_volumes_tags'](tag_maps=[{ 'filters': {'volume_ids': [volume_id]}, 'tags': {'Name': volume_name} }], **args) if _rt['success'] is False: raise SaltInvocationError('Error updating requested volume ' '{0} with name {1}. {2}'.format(volume_id, volume_name, _rt['comment'])) old_dict['volume_id'] = None new_dict['volume_id'] = volume_id else: volume_id = vols[0] vols = __salt__['boto_ec2.get_all_volumes'](volume_ids=[volume_id], return_objs=True, **args) if not vols: raise SaltInvocationError('Volume {0} do not exist'.format(volume_id)) vol = vols[0] if vol.zone != instance.placement: raise SaltInvocationError(('Volume {0} in {1} cannot attach to instance' ' {2} in {3}.').format(volume_id, vol.zone, instance_id, instance.placement)) attach_data = vol.attach_data if attach_data is not None and attach_data.instance_id is not None: if instance_id == attach_data.instance_id and device == attach_data.device: ret['comment'] = 'The volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device) return ret else: if __opts__['test']: ret['comment'] = ('The volume {0} is set to be detached' ' from {1}({2} and attached on {3}({4}).').format(attach_data.instance_id, attach_data.devic, volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.detach_volume'](volume_id=volume_id, wait_for_detachement=True, **args): ret['comment'] = 'Volume {0} is detached from {1}({2}).'.format(volume_id, attach_data.instance_id, attach_data.device) old_dict['instance_id'] = attach_data.instance_id old_dict['device'] = attach_data.device else: raise SaltInvocationError(('The volume {0} is already attached on instance {1}({2}).' ' Failed to detach').format(volume_id, attach_data.instance_id, attach_data.device)) else: old_dict['instance_id'] = instance_id old_dict['device'] = None if __opts__['test']: ret['comment'] = 'The volume {0} is set to be attached on {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.attach_volume'](volume_id=volume_id, instance_id=instance_id, device=device, **args): ret['comment'] = ' '.join([ ret['comment'], 'Volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device)]) new_dict['instance_id'] = instance_id new_dict['device'] = device ret['changes'] = {'old': old_dict, 'new': new_dict} else: ret['comment'] = 'Error attaching volume {0} to instance {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = False return ret def private_ips_present(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, allow_reassignment=False, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI has secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be present on the ENI. allow_reassignment (Boolean) - If true, will reassign a secondary private ip address associated with another ENI. If false, state will fail if the secondary private ip address is associated with another ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to associate with the " "ENI") ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'old': [], 'new': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any new secondary private ips to add to the eni if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) ips_to_add = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['old']: ips_to_add.append(private_ip) if ips_to_add: if not __opts__['test']: # Assign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_add, 'allow_reassignment': allow_reassignment, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.assign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly assigned to ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_added = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['new']: ips_not_added.append(private_ip) # Display results if ips_not_added: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to add: {1}\n' 'could not add the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_add), '\n\t- ' + '\n\t- '.join(ips_not_added))) else: ret['comment'] = "added ips: {0}".format( '\n\t- ' + '\n\t- '.join(ips_to_add)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to add ret['comment'] = ('ips on eni: {0}\n' 'ips that would be added: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_add))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on eni: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret def private_ips_absent(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI does not have secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be absent on the ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to unassociate with " "the ENI") if not isinstance(private_ip_addresses, list): private_ip_addresses = [private_ip_addresses] ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'new': [], 'old': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any old private ips to remove from the eni primary_private_ip = None if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) if eni_pip['primary']: primary_private_ip = eni_pip['private_ip_address'] ips_to_remove = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['old']: ips_to_remove.append(private_ip) if private_ip == primary_private_ip: ret['result'] = False ret['comment'] = ('You cannot unassign the primary private ip address ({0}) on an ' 'eni\n' 'ips on eni: {1}\n' 'attempted to remove: {2}\n'.format( primary_private_ip, '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(private_ip_addresses))) ret['changes'] = {} return ret if ips_to_remove: if not __opts__['test']: # Unassign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_remove, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.unassign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly unassigned from ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_removed = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['new']: ips_not_removed.append(private_ip) if ips_not_removed: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to remove: {1}\n' 'could not remove the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_remove), '\n\t- ' + '\n\t- '.join(ips_not_removed))) else: ret['comment'] = "removed ips: {0}".format('\n\t- ' + '\n\t- '.join(ips_to_remove)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to remove ret['comment'] = ('ips on eni: {0}\n' 'ips that would be removed: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_remove))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on network interface: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret
saltstack/salt
salt/states/boto_ec2.py
volumes_tagged
python
def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ''' Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags would be updated.' ret['changes'] = r['changes'] ret['result'] = None else: ret['comment'] = 'Error validating requested volume tags.' ret['result'] = False return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret
Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_ec2.py#L1221-L1309
null
# -*- coding: utf-8 -*- ''' Manage EC2 .. versionadded:: 2015.8.0 This module provides an interface to the Elastic Compute Cloud (EC2) service from AWS. The below code creates a key pair: .. code-block:: yaml create-key-pair: boto_ec2.key_present: - name: mykeypair - save_private: /root/ - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: 'ssh-rsa AAAA' - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs You can also use salt:// in order to define the public key. .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: salt://mybase/public_key.pub - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs The below code deletes a key pair: .. code-block:: yaml delete-key-pair: boto_ec2.key_absent: - name: mykeypair - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging from time import time, sleep # Import salt libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.utils.data import salt.utils.dictupdate as dictupdate from salt.exceptions import SaltInvocationError, CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' if 'boto_ec2.get_key' in __salt__: return 'boto_ec2' else: return False def key_present(name, save_private=None, upload_public=None, region=None, key=None, keyid=None, profile=None): ''' Ensure key pair is present. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) log.debug('exists is %s', exists) if upload_public is not None and 'salt://' in upload_public: try: upload_public = __salt__['cp.get_file_str'](upload_public) except IOError as e: log.debug(e) ret['comment'] = 'File {0} not found.'.format(upload_public) ret['result'] = False return ret if not exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be created.'.format(name) ret['result'] = None return ret if save_private and not upload_public: created = __salt__['boto_ec2.create_key']( name, save_private, region, key, keyid, profile ) if created: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['new'] = created else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) elif not save_private and upload_public: imported = __salt__['boto_ec2.import_key'](name, upload_public, region, key, keyid, profile) if imported: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['old'] = None ret['changes']['new'] = imported else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) else: ret['result'] = False ret['comment'] = 'You can either upload or download a private key ' else: ret['result'] = True ret['comment'] = 'The key name {0} already exists'.format(name) return ret def key_absent(name, region=None, key=None, keyid=None, profile=None): ''' Deletes a key pair ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be deleted.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_ec2.delete_key'](name, region, key, keyid, profile) log.debug('exists is %s', deleted) if deleted: ret['result'] = True ret['comment'] = 'The key {0} is deleted.'.format(name) ret['changes']['old'] = name else: ret['result'] = False ret['comment'] = 'Could not delete key {0} '.format(name) else: ret['result'] = True ret['comment'] = 'The key name {0} does not exist'.format(name) return ret def eni_present( name, subnet_id=None, subnet_name=None, private_ip_address=None, description=None, groups=None, source_dest_check=True, allocate_eip=None, arecords=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI exists. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. subnet_id The VPC subnet ID the ENI will exist within. subnet_name The VPC subnet name the ENI will exist within. private_ip_address The private ip address to use for this ENI. If this is not specified AWS will automatically assign a private IP address to the ENI. Must be specified at creation time; will be ignored afterward. description Description of the key. groups A list of security groups to apply to the ENI. source_dest_check Boolean specifying whether source/destination checking is enabled on the ENI. allocate_eip allocate and associate an EIP to the ENI. Could be 'standard' to allocate Elastic IP to EC2 region or 'vpc' to get it for a particular VPC .. versionchanged:: 2016.11.0 arecords A list of arecord dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((subnet_id, subnet_name)): raise SaltInvocationError('One (but not both) of subnet_id or ' 'subnet_name must be provided.') if not groups: raise SaltInvocationError('groups is a required argument.') if not isinstance(groups, list): raise SaltInvocationError('groups must be a list.') if not isinstance(source_dest_check, bool): raise SaltInvocationError('source_dest_check must be a bool.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be created.' if allocate_eip: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated/assocaited to the ENI.']) if arecords: ret['comment'] = ' '.join([ret['comment'], 'A records are set to be created.']) ret['result'] = None return ret result_create = __salt__['boto_ec2.create_network_interface']( name, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, description=description, groups=groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_create: ret['result'] = False ret['comment'] = 'Failed to create ENI: {0}'.format( result_create['error']['message'] ) return ret r['result'] = result_create['result'] ret['comment'] = 'Created ENI {0}'.format(name) ret['changes']['id'] = r['result']['id'] else: _ret = _eni_attribute( r['result'], 'description', description, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = _ret['comment'] if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _eni_groups( r['result'], groups, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret # Actions that need to occur whether creating or updating _ret = _eni_attribute( r['result'], 'source_dest_check', source_dest_check, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret if allocate_eip: if 'allocationId' not in r['result']: if __opts__['test']: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated and assocaited to the ENI.']) else: domain = 'vpc' if allocate_eip == 'vpc' else None eip_alloc = __salt__['boto_ec2.allocate_eip_address'](domain=domain, region=region, key=key, keyid=keyid, profile=profile) if eip_alloc: _ret = __salt__['boto_ec2.associate_eip_address'](instance_id=None, instance_name=None, public_ip=None, allocation_id=eip_alloc['allocation_id'], network_interface_id=r['result']['id'], private_ip_address=None, allow_reassociation=False, region=region, key=key, keyid=keyid, profile=profile) if not _ret: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=eip_alloc['allocation_id'], region=region, key=key, keyid=keyid, profile=profile) ret['result'] = False msg = 'Failed to assocaite the allocated EIP address with the ENI. The EIP {0}'.format('was successfully released.' if _ret else 'was NOT RELEASED.') ret['comment'] = ' '.join([ret['comment'], msg]) return ret else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to allocate an EIP address']) return ret else: ret['comment'] = ' '.join([ret['comment'], 'An EIP is already allocated/assocaited to the ENI']) if arecords: for arecord in arecords: if 'name' not in arecord: msg = 'The arecord must contain a "name" property.' raise SaltInvocationError(msg) log.debug('processing arecord %s', arecord) _ret = None dns_provider = 'boto_route53' arecord['record_type'] = 'A' public_ip_arecord = False if 'public' in arecord: public_ip_arecord = arecord.pop('public') if public_ip_arecord: if 'publicIp' in r['result']: arecord['value'] = r['result']['publicIp'] elif 'public_ip' in eip_alloc: arecord['value'] = eip_alloc['public_ip'] else: msg = 'Unable to add an A record for the public IP address, a public IP address does not seem to be allocated to this ENI.' raise CommandExecutionError(msg) else: arecord['value'] = r['result']['private_ip_address'] if 'provider' in arecord: dns_provider = arecord.pop('provider') if dns_provider == 'boto_route53': if 'profile' not in arecord: arecord['profile'] = profile if 'key' not in arecord: arecord['key'] = key if 'keyid' not in arecord: arecord['keyid'] = keyid if 'region' not in arecord: arecord['region'] = region _ret = __states__['.'.join([dns_provider, 'present'])](**arecord) log.debug('ret from dns_provider.present = %s', _ret) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret return ret def _eni_attribute(metadata, attr, value, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if metadata[attr] == value: return ret if __opts__['test']: ret['comment'] = 'ENI set to have {0} updated.'.format(attr) ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr=attr, value=value, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI {0}: {1}.' ret['result'] = False ret['comment'] = msg.format(attr, result_update['error']['message']) else: ret['comment'] = 'Updated ENI {0}.'.format(attr) ret['changes'][attr] = { 'old': metadata[attr], 'new': value } return ret def _eni_groups(metadata, groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} group_ids = [g['id'] for g in metadata['groups']] group_ids.sort() _groups = __salt__['boto_secgroup.convert_to_group_ids']( groups, vpc_id=metadata['vpc_id'], region=region, key=key, keyid=keyid, profile=profile ) if not _groups: ret['comment'] = 'Could not find secgroup ids for provided groups.' ret['result'] = False _groups.sort() if group_ids == _groups: return ret if __opts__['test']: ret['comment'] = 'ENI set to have groups updated.' ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr='groups', value=_groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI groups: {1}.' ret['result'] = False ret['comment'] = msg.format(result_update['error']['message']) else: ret['comment'] = 'Updated ENI groups.' ret['changes']['groups'] = { 'old': group_ids, 'new': _groups } return ret def eni_absent( name, release_eip=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI is absent. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. release_eip True/False - release any EIP associated with the ENI region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' ret['result'] = None return ret else: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' if release_eip and 'allocationId' in r['result']: ret['comment'] = ' '.join([ret['comment'], 'Allocated/associated EIP is set to be released']) ret['result'] = None return ret if 'id' in r['result']['attachment']: result_detach = __salt__['boto_ec2.detach_network_interface']( name=name, force=True, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_detach: ret['result'] = False ret['comment'] = 'Failed to detach ENI: {0}'.format( result_detach['error']['message'] ) return ret # TODO: Ensure the detach occurs before continuing result_delete = __salt__['boto_ec2.delete_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_delete: ret['result'] = False ret['comment'] = 'Failed to delete ENI: {0}'.format( result_delete['error']['message'] ) return ret ret['comment'] = 'Deleted ENI {0}'.format(name) ret['changes']['id'] = None if release_eip and 'allocationId' in r['result']: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=r['result']['allocationId'], region=region, key=key, keyid=keyid, profile=profile) if not _ret: ret['comment'] = ' '.join([ret['comment'], 'Failed to release EIP allocated to the ENI.']) ret['result'] = False return ret else: ret['comment'] = ' '.join([ret['comment'], 'EIP released.']) ret['changes']['eip released'] = True return ret def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs): ''' Create a snapshot from the given instance .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs): ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name) ret['result'] = False return ret ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name) ret['changes']['new'] = {ami_name: ami_name} if not wait_until_available: return ret starttime = time() while True: images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs) if images and images[0].state == 'available': break if time() - starttime > wait_timeout_seconds: if images: ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state) else: ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name) ret['result'] = False return ret sleep(5) return ret def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.data.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instances: _create = True elif len(instances) > 1: log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id') instance_id = None # No way to know, we'll just have to bail later.... else: instance_id = instances[0] if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if image_ids: image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address %s allocated.", r['public_ip']) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info( 'Waiting up to %s secs for new EIP %s to become available', tries * secs, public_ip or allocation_id ) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: if 'new' not in ret['changes']: ret['changes']['new'] = {} ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) curr = {} if not isinstance(curr, dict) else curr if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True if tags and instance_id is not None: tags = dict(tags) curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id}, region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {})) current = set(curr_tags.keys()) desired = set(tags.keys()) remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set... add = dict([(t, tags[t]) for t in desired - current]) replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)]) # Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative. add.update(replace) if add or remove: if __opts__['test']: ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if instance_name else name) else: if remove: if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while deleting tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret if add: if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while creating tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags return ret def instance_absent(name, instance_name=None, instance_id=None, release_eip=False, region=None, key=None, keyid=None, profile=None, filters=None): ''' Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12 ''' ### TODO - Implement 'force' option?? Would automagically turn off ### 'disableApiTermination', as needed, before trying to delete. ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not instance_id: try: instance_id = __salt__['boto_ec2.get_id'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states, filters=filters) except CommandExecutionError as e: ret['result'] = None ret['comment'] = ("Couldn't determine current status of instance " "{0}.".format(instance_name or name)) return ret instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, return_objs=True, filters=filters) if not instances: ret['result'] = True ret['comment'] = 'Instance {0} is already gone.'.format(instance_id) return ret instance = instances[0] ### Honor 'disableApiTermination' - if you want to override it, first use set_attribute() to turn it off no_can_do = __salt__['boto_ec2.get_attribute']('disableApiTermination', instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) if no_can_do.get('disableApiTermination') is True: ret['result'] = False ret['comment'] = 'Termination of instance {0} via the API is disabled.'.format(instance_id) return ret if __opts__['test']: ret['comment'] = 'The instance {0} is set to be deleted.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.terminate'](instance_id=instance_id, name=instance_name, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to terminate instance {0}.'.format(instance_id) return ret ret['changes']['old'] = {'instance_id': instance_id} ret['changes']['new'] = None if release_eip: ip = getattr(instance, 'ip_address', None) if ip: base_args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} public_ip = None alloc_id = None assoc_id = None if getattr(instance, 'vpc_id', None): r = __salt__['boto_ec2.get_eip_address_info'](addresses=ip, **base_args) if r and 'allocation_id' in r[0]: alloc_id = r[0]['allocation_id'] assoc_id = r[0].get('association_id') else: # I /believe/ this situation is impossible but let's hedge our bets... ret['result'] = False ret['comment'] = "Can't determine AllocationId for address {0}.".format(ip) return ret else: public_ip = instance.ip_address if assoc_id: # Race here - sometimes the terminate above will already have dropped this if not __salt__['boto_ec2.disassociate_eip_address'](association_id=assoc_id, **base_args): log.warning("Failed to disassociate EIP %s.", ip) if __salt__['boto_ec2.release_eip_address'](allocation_id=alloc_id, public_ip=public_ip, **base_args): log.info("Released EIP address %s", public_ip or r[0]['public_ip']) ret['changes']['old']['public_ip'] = public_ip or r[0]['public_ip'] else: ret['result'] = False ret['comment'] = "Failed to release EIP {0}.".format(ip) return ret return ret def volume_absent(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is detached and absent. .. versionadded:: 2016.11.0 name State definition name. volume_name Name tag associated with the volume. For safety, if this matches more than one volume, the state will refuse to apply. volume_id Resource ID of the volume. instance_name Only remove volume if it is attached to instance with this Name tag. Exclusive with 'instance_id'. Requires 'device'. instance_id Only remove volume if it is attached to this instance. Exclusive with 'instance_name'. Requires 'device'. device Match by device rather than ID. Requires one of 'instance_name' or 'instance_id'. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } filters = {} running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id, instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " "'instance_name', or 'instance_id' must be provided.") if (instance_name or instance_id) and not device: raise SaltInvocationError("Parameter 'device' is required when either " "'instance_name' or 'instance_id' is specified.") if volume_id: filters.update({'volume-id': volume_id}) if volume_name: filters.update({'tag:Name': volume_name}) if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instance_id: ret['comment'] = ('Instance with Name {0} not found. Assuming ' 'associated volumes gone.'.format(instance_name)) return ret if instance_id: filters.update({'attachment.instance-id': instance_id}) if device: filters.update({'attachment.device': device}) args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if not vols: ret['comment'] = 'Volume matching criteria not found, assuming already absent' return ret if len(vols) > 1: msg = "More than one volume matched criteria, can't continue in state {0}".format(name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret vol = vols[0] log.info('Matched Volume ID %s', vol) if __opts__['test']: ret['comment'] = 'The volume {0} is set to be deleted.'.format(vol) ret['result'] = None return ret if __salt__['boto_ec2.delete_volume'](volume_id=vol, force=True, **args): ret['comment'] = 'Volume {0} deleted.'.format(vol) ret['changes'] = {'old': {'volume_id': vol}, 'new': {'volume_id': None}} else: ret['comment'] = 'Error deleting volume {0}.'.format(vol) ret['result'] = False return ret def volume_present(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} old_dict = {} new_dict = {} running_states = ('running', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " " must be provided.") if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'instance_name', or 'instance_id'" " must be provided.") if device is None: raise SaltInvocationError("Parameter 'device' is required.") args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, in_states=running_states, **args) if not instance_id: raise SaltInvocationError('Instance with Name {0} not found.'.format(instance_name)) instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, return_objs=True, **args) instance = instances[0] if volume_name: filters = {} filters.update({'tag:Name': volume_name}) vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if len(vols) > 1: msg = "More than one volume matched volume name {0}, can't continue in state {1}".format(volume_name, name) raise SaltInvocationError(msg) if not vols: if __opts__['test']: ret['comment'] = ('The volume with name {0} is set to be created and attached' ' on {1}({2}).'.format(volume_name, instance_id, device)) ret['result'] = None return ret _rt = __salt__['boto_ec2.create_volume'](zone_name=instance.placement, size=size, snapshot_id=snapshot_id, volume_type=volume_type, iops=iops, encrypted=encrypted, kms_key_id=kms_key_id, wait_for_creation=True, **args) if 'result' in _rt: volume_id = _rt['result'] else: raise SaltInvocationError('Error creating volume with name {0}.'.format(volume_name)) _rt = __salt__['boto_ec2.set_volumes_tags'](tag_maps=[{ 'filters': {'volume_ids': [volume_id]}, 'tags': {'Name': volume_name} }], **args) if _rt['success'] is False: raise SaltInvocationError('Error updating requested volume ' '{0} with name {1}. {2}'.format(volume_id, volume_name, _rt['comment'])) old_dict['volume_id'] = None new_dict['volume_id'] = volume_id else: volume_id = vols[0] vols = __salt__['boto_ec2.get_all_volumes'](volume_ids=[volume_id], return_objs=True, **args) if not vols: raise SaltInvocationError('Volume {0} do not exist'.format(volume_id)) vol = vols[0] if vol.zone != instance.placement: raise SaltInvocationError(('Volume {0} in {1} cannot attach to instance' ' {2} in {3}.').format(volume_id, vol.zone, instance_id, instance.placement)) attach_data = vol.attach_data if attach_data is not None and attach_data.instance_id is not None: if instance_id == attach_data.instance_id and device == attach_data.device: ret['comment'] = 'The volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device) return ret else: if __opts__['test']: ret['comment'] = ('The volume {0} is set to be detached' ' from {1}({2} and attached on {3}({4}).').format(attach_data.instance_id, attach_data.devic, volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.detach_volume'](volume_id=volume_id, wait_for_detachement=True, **args): ret['comment'] = 'Volume {0} is detached from {1}({2}).'.format(volume_id, attach_data.instance_id, attach_data.device) old_dict['instance_id'] = attach_data.instance_id old_dict['device'] = attach_data.device else: raise SaltInvocationError(('The volume {0} is already attached on instance {1}({2}).' ' Failed to detach').format(volume_id, attach_data.instance_id, attach_data.device)) else: old_dict['instance_id'] = instance_id old_dict['device'] = None if __opts__['test']: ret['comment'] = 'The volume {0} is set to be attached on {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.attach_volume'](volume_id=volume_id, instance_id=instance_id, device=device, **args): ret['comment'] = ' '.join([ ret['comment'], 'Volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device)]) new_dict['instance_id'] = instance_id new_dict['device'] = device ret['changes'] = {'old': old_dict, 'new': new_dict} else: ret['comment'] = 'Error attaching volume {0} to instance {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = False return ret def private_ips_present(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, allow_reassignment=False, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI has secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be present on the ENI. allow_reassignment (Boolean) - If true, will reassign a secondary private ip address associated with another ENI. If false, state will fail if the secondary private ip address is associated with another ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to associate with the " "ENI") ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'old': [], 'new': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any new secondary private ips to add to the eni if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) ips_to_add = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['old']: ips_to_add.append(private_ip) if ips_to_add: if not __opts__['test']: # Assign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_add, 'allow_reassignment': allow_reassignment, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.assign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly assigned to ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_added = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['new']: ips_not_added.append(private_ip) # Display results if ips_not_added: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to add: {1}\n' 'could not add the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_add), '\n\t- ' + '\n\t- '.join(ips_not_added))) else: ret['comment'] = "added ips: {0}".format( '\n\t- ' + '\n\t- '.join(ips_to_add)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to add ret['comment'] = ('ips on eni: {0}\n' 'ips that would be added: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_add))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on eni: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret def private_ips_absent(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI does not have secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be absent on the ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to unassociate with " "the ENI") if not isinstance(private_ip_addresses, list): private_ip_addresses = [private_ip_addresses] ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'new': [], 'old': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any old private ips to remove from the eni primary_private_ip = None if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) if eni_pip['primary']: primary_private_ip = eni_pip['private_ip_address'] ips_to_remove = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['old']: ips_to_remove.append(private_ip) if private_ip == primary_private_ip: ret['result'] = False ret['comment'] = ('You cannot unassign the primary private ip address ({0}) on an ' 'eni\n' 'ips on eni: {1}\n' 'attempted to remove: {2}\n'.format( primary_private_ip, '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(private_ip_addresses))) ret['changes'] = {} return ret if ips_to_remove: if not __opts__['test']: # Unassign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_remove, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.unassign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly unassigned from ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_removed = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['new']: ips_not_removed.append(private_ip) if ips_not_removed: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to remove: {1}\n' 'could not remove the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_remove), '\n\t- ' + '\n\t- '.join(ips_not_removed))) else: ret['comment'] = "removed ips: {0}".format('\n\t- ' + '\n\t- '.join(ips_to_remove)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to remove ret['comment'] = ('ips on eni: {0}\n' 'ips that would be removed: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_remove))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on network interface: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret
saltstack/salt
salt/states/boto_ec2.py
volume_present
python
def volume_present(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} old_dict = {} new_dict = {} running_states = ('running', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " " must be provided.") if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'instance_name', or 'instance_id'" " must be provided.") if device is None: raise SaltInvocationError("Parameter 'device' is required.") args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, in_states=running_states, **args) if not instance_id: raise SaltInvocationError('Instance with Name {0} not found.'.format(instance_name)) instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, return_objs=True, **args) instance = instances[0] if volume_name: filters = {} filters.update({'tag:Name': volume_name}) vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if len(vols) > 1: msg = "More than one volume matched volume name {0}, can't continue in state {1}".format(volume_name, name) raise SaltInvocationError(msg) if not vols: if __opts__['test']: ret['comment'] = ('The volume with name {0} is set to be created and attached' ' on {1}({2}).'.format(volume_name, instance_id, device)) ret['result'] = None return ret _rt = __salt__['boto_ec2.create_volume'](zone_name=instance.placement, size=size, snapshot_id=snapshot_id, volume_type=volume_type, iops=iops, encrypted=encrypted, kms_key_id=kms_key_id, wait_for_creation=True, **args) if 'result' in _rt: volume_id = _rt['result'] else: raise SaltInvocationError('Error creating volume with name {0}.'.format(volume_name)) _rt = __salt__['boto_ec2.set_volumes_tags'](tag_maps=[{ 'filters': {'volume_ids': [volume_id]}, 'tags': {'Name': volume_name} }], **args) if _rt['success'] is False: raise SaltInvocationError('Error updating requested volume ' '{0} with name {1}. {2}'.format(volume_id, volume_name, _rt['comment'])) old_dict['volume_id'] = None new_dict['volume_id'] = volume_id else: volume_id = vols[0] vols = __salt__['boto_ec2.get_all_volumes'](volume_ids=[volume_id], return_objs=True, **args) if not vols: raise SaltInvocationError('Volume {0} do not exist'.format(volume_id)) vol = vols[0] if vol.zone != instance.placement: raise SaltInvocationError(('Volume {0} in {1} cannot attach to instance' ' {2} in {3}.').format(volume_id, vol.zone, instance_id, instance.placement)) attach_data = vol.attach_data if attach_data is not None and attach_data.instance_id is not None: if instance_id == attach_data.instance_id and device == attach_data.device: ret['comment'] = 'The volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device) return ret else: if __opts__['test']: ret['comment'] = ('The volume {0} is set to be detached' ' from {1}({2} and attached on {3}({4}).').format(attach_data.instance_id, attach_data.devic, volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.detach_volume'](volume_id=volume_id, wait_for_detachement=True, **args): ret['comment'] = 'Volume {0} is detached from {1}({2}).'.format(volume_id, attach_data.instance_id, attach_data.device) old_dict['instance_id'] = attach_data.instance_id old_dict['device'] = attach_data.device else: raise SaltInvocationError(('The volume {0} is already attached on instance {1}({2}).' ' Failed to detach').format(volume_id, attach_data.instance_id, attach_data.device)) else: old_dict['instance_id'] = instance_id old_dict['device'] = None if __opts__['test']: ret['comment'] = 'The volume {0} is set to be attached on {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.attach_volume'](volume_id=volume_id, instance_id=instance_id, device=device, **args): ret['comment'] = ' '.join([ ret['comment'], 'Volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device)]) new_dict['instance_id'] = instance_id new_dict['device'] = device ret['changes'] = {'old': old_dict, 'new': new_dict} else: ret['comment'] = 'Error attaching volume {0} to instance {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = False return ret
Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_ec2.py#L1312-L1509
null
# -*- coding: utf-8 -*- ''' Manage EC2 .. versionadded:: 2015.8.0 This module provides an interface to the Elastic Compute Cloud (EC2) service from AWS. The below code creates a key pair: .. code-block:: yaml create-key-pair: boto_ec2.key_present: - name: mykeypair - save_private: /root/ - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: 'ssh-rsa AAAA' - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs You can also use salt:// in order to define the public key. .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: salt://mybase/public_key.pub - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs The below code deletes a key pair: .. code-block:: yaml delete-key-pair: boto_ec2.key_absent: - name: mykeypair - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging from time import time, sleep # Import salt libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.utils.data import salt.utils.dictupdate as dictupdate from salt.exceptions import SaltInvocationError, CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' if 'boto_ec2.get_key' in __salt__: return 'boto_ec2' else: return False def key_present(name, save_private=None, upload_public=None, region=None, key=None, keyid=None, profile=None): ''' Ensure key pair is present. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) log.debug('exists is %s', exists) if upload_public is not None and 'salt://' in upload_public: try: upload_public = __salt__['cp.get_file_str'](upload_public) except IOError as e: log.debug(e) ret['comment'] = 'File {0} not found.'.format(upload_public) ret['result'] = False return ret if not exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be created.'.format(name) ret['result'] = None return ret if save_private and not upload_public: created = __salt__['boto_ec2.create_key']( name, save_private, region, key, keyid, profile ) if created: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['new'] = created else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) elif not save_private and upload_public: imported = __salt__['boto_ec2.import_key'](name, upload_public, region, key, keyid, profile) if imported: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['old'] = None ret['changes']['new'] = imported else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) else: ret['result'] = False ret['comment'] = 'You can either upload or download a private key ' else: ret['result'] = True ret['comment'] = 'The key name {0} already exists'.format(name) return ret def key_absent(name, region=None, key=None, keyid=None, profile=None): ''' Deletes a key pair ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be deleted.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_ec2.delete_key'](name, region, key, keyid, profile) log.debug('exists is %s', deleted) if deleted: ret['result'] = True ret['comment'] = 'The key {0} is deleted.'.format(name) ret['changes']['old'] = name else: ret['result'] = False ret['comment'] = 'Could not delete key {0} '.format(name) else: ret['result'] = True ret['comment'] = 'The key name {0} does not exist'.format(name) return ret def eni_present( name, subnet_id=None, subnet_name=None, private_ip_address=None, description=None, groups=None, source_dest_check=True, allocate_eip=None, arecords=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI exists. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. subnet_id The VPC subnet ID the ENI will exist within. subnet_name The VPC subnet name the ENI will exist within. private_ip_address The private ip address to use for this ENI. If this is not specified AWS will automatically assign a private IP address to the ENI. Must be specified at creation time; will be ignored afterward. description Description of the key. groups A list of security groups to apply to the ENI. source_dest_check Boolean specifying whether source/destination checking is enabled on the ENI. allocate_eip allocate and associate an EIP to the ENI. Could be 'standard' to allocate Elastic IP to EC2 region or 'vpc' to get it for a particular VPC .. versionchanged:: 2016.11.0 arecords A list of arecord dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((subnet_id, subnet_name)): raise SaltInvocationError('One (but not both) of subnet_id or ' 'subnet_name must be provided.') if not groups: raise SaltInvocationError('groups is a required argument.') if not isinstance(groups, list): raise SaltInvocationError('groups must be a list.') if not isinstance(source_dest_check, bool): raise SaltInvocationError('source_dest_check must be a bool.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be created.' if allocate_eip: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated/assocaited to the ENI.']) if arecords: ret['comment'] = ' '.join([ret['comment'], 'A records are set to be created.']) ret['result'] = None return ret result_create = __salt__['boto_ec2.create_network_interface']( name, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, description=description, groups=groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_create: ret['result'] = False ret['comment'] = 'Failed to create ENI: {0}'.format( result_create['error']['message'] ) return ret r['result'] = result_create['result'] ret['comment'] = 'Created ENI {0}'.format(name) ret['changes']['id'] = r['result']['id'] else: _ret = _eni_attribute( r['result'], 'description', description, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = _ret['comment'] if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _eni_groups( r['result'], groups, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret # Actions that need to occur whether creating or updating _ret = _eni_attribute( r['result'], 'source_dest_check', source_dest_check, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret if allocate_eip: if 'allocationId' not in r['result']: if __opts__['test']: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated and assocaited to the ENI.']) else: domain = 'vpc' if allocate_eip == 'vpc' else None eip_alloc = __salt__['boto_ec2.allocate_eip_address'](domain=domain, region=region, key=key, keyid=keyid, profile=profile) if eip_alloc: _ret = __salt__['boto_ec2.associate_eip_address'](instance_id=None, instance_name=None, public_ip=None, allocation_id=eip_alloc['allocation_id'], network_interface_id=r['result']['id'], private_ip_address=None, allow_reassociation=False, region=region, key=key, keyid=keyid, profile=profile) if not _ret: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=eip_alloc['allocation_id'], region=region, key=key, keyid=keyid, profile=profile) ret['result'] = False msg = 'Failed to assocaite the allocated EIP address with the ENI. The EIP {0}'.format('was successfully released.' if _ret else 'was NOT RELEASED.') ret['comment'] = ' '.join([ret['comment'], msg]) return ret else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to allocate an EIP address']) return ret else: ret['comment'] = ' '.join([ret['comment'], 'An EIP is already allocated/assocaited to the ENI']) if arecords: for arecord in arecords: if 'name' not in arecord: msg = 'The arecord must contain a "name" property.' raise SaltInvocationError(msg) log.debug('processing arecord %s', arecord) _ret = None dns_provider = 'boto_route53' arecord['record_type'] = 'A' public_ip_arecord = False if 'public' in arecord: public_ip_arecord = arecord.pop('public') if public_ip_arecord: if 'publicIp' in r['result']: arecord['value'] = r['result']['publicIp'] elif 'public_ip' in eip_alloc: arecord['value'] = eip_alloc['public_ip'] else: msg = 'Unable to add an A record for the public IP address, a public IP address does not seem to be allocated to this ENI.' raise CommandExecutionError(msg) else: arecord['value'] = r['result']['private_ip_address'] if 'provider' in arecord: dns_provider = arecord.pop('provider') if dns_provider == 'boto_route53': if 'profile' not in arecord: arecord['profile'] = profile if 'key' not in arecord: arecord['key'] = key if 'keyid' not in arecord: arecord['keyid'] = keyid if 'region' not in arecord: arecord['region'] = region _ret = __states__['.'.join([dns_provider, 'present'])](**arecord) log.debug('ret from dns_provider.present = %s', _ret) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret return ret def _eni_attribute(metadata, attr, value, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if metadata[attr] == value: return ret if __opts__['test']: ret['comment'] = 'ENI set to have {0} updated.'.format(attr) ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr=attr, value=value, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI {0}: {1}.' ret['result'] = False ret['comment'] = msg.format(attr, result_update['error']['message']) else: ret['comment'] = 'Updated ENI {0}.'.format(attr) ret['changes'][attr] = { 'old': metadata[attr], 'new': value } return ret def _eni_groups(metadata, groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} group_ids = [g['id'] for g in metadata['groups']] group_ids.sort() _groups = __salt__['boto_secgroup.convert_to_group_ids']( groups, vpc_id=metadata['vpc_id'], region=region, key=key, keyid=keyid, profile=profile ) if not _groups: ret['comment'] = 'Could not find secgroup ids for provided groups.' ret['result'] = False _groups.sort() if group_ids == _groups: return ret if __opts__['test']: ret['comment'] = 'ENI set to have groups updated.' ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr='groups', value=_groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI groups: {1}.' ret['result'] = False ret['comment'] = msg.format(result_update['error']['message']) else: ret['comment'] = 'Updated ENI groups.' ret['changes']['groups'] = { 'old': group_ids, 'new': _groups } return ret def eni_absent( name, release_eip=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI is absent. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. release_eip True/False - release any EIP associated with the ENI region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' ret['result'] = None return ret else: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' if release_eip and 'allocationId' in r['result']: ret['comment'] = ' '.join([ret['comment'], 'Allocated/associated EIP is set to be released']) ret['result'] = None return ret if 'id' in r['result']['attachment']: result_detach = __salt__['boto_ec2.detach_network_interface']( name=name, force=True, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_detach: ret['result'] = False ret['comment'] = 'Failed to detach ENI: {0}'.format( result_detach['error']['message'] ) return ret # TODO: Ensure the detach occurs before continuing result_delete = __salt__['boto_ec2.delete_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_delete: ret['result'] = False ret['comment'] = 'Failed to delete ENI: {0}'.format( result_delete['error']['message'] ) return ret ret['comment'] = 'Deleted ENI {0}'.format(name) ret['changes']['id'] = None if release_eip and 'allocationId' in r['result']: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=r['result']['allocationId'], region=region, key=key, keyid=keyid, profile=profile) if not _ret: ret['comment'] = ' '.join([ret['comment'], 'Failed to release EIP allocated to the ENI.']) ret['result'] = False return ret else: ret['comment'] = ' '.join([ret['comment'], 'EIP released.']) ret['changes']['eip released'] = True return ret def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs): ''' Create a snapshot from the given instance .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs): ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name) ret['result'] = False return ret ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name) ret['changes']['new'] = {ami_name: ami_name} if not wait_until_available: return ret starttime = time() while True: images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs) if images and images[0].state == 'available': break if time() - starttime > wait_timeout_seconds: if images: ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state) else: ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name) ret['result'] = False return ret sleep(5) return ret def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.data.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instances: _create = True elif len(instances) > 1: log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id') instance_id = None # No way to know, we'll just have to bail later.... else: instance_id = instances[0] if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if image_ids: image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address %s allocated.", r['public_ip']) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info( 'Waiting up to %s secs for new EIP %s to become available', tries * secs, public_ip or allocation_id ) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: if 'new' not in ret['changes']: ret['changes']['new'] = {} ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) curr = {} if not isinstance(curr, dict) else curr if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True if tags and instance_id is not None: tags = dict(tags) curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id}, region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {})) current = set(curr_tags.keys()) desired = set(tags.keys()) remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set... add = dict([(t, tags[t]) for t in desired - current]) replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)]) # Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative. add.update(replace) if add or remove: if __opts__['test']: ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if instance_name else name) else: if remove: if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while deleting tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret if add: if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while creating tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags return ret def instance_absent(name, instance_name=None, instance_id=None, release_eip=False, region=None, key=None, keyid=None, profile=None, filters=None): ''' Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12 ''' ### TODO - Implement 'force' option?? Would automagically turn off ### 'disableApiTermination', as needed, before trying to delete. ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not instance_id: try: instance_id = __salt__['boto_ec2.get_id'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states, filters=filters) except CommandExecutionError as e: ret['result'] = None ret['comment'] = ("Couldn't determine current status of instance " "{0}.".format(instance_name or name)) return ret instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, return_objs=True, filters=filters) if not instances: ret['result'] = True ret['comment'] = 'Instance {0} is already gone.'.format(instance_id) return ret instance = instances[0] ### Honor 'disableApiTermination' - if you want to override it, first use set_attribute() to turn it off no_can_do = __salt__['boto_ec2.get_attribute']('disableApiTermination', instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) if no_can_do.get('disableApiTermination') is True: ret['result'] = False ret['comment'] = 'Termination of instance {0} via the API is disabled.'.format(instance_id) return ret if __opts__['test']: ret['comment'] = 'The instance {0} is set to be deleted.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.terminate'](instance_id=instance_id, name=instance_name, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to terminate instance {0}.'.format(instance_id) return ret ret['changes']['old'] = {'instance_id': instance_id} ret['changes']['new'] = None if release_eip: ip = getattr(instance, 'ip_address', None) if ip: base_args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} public_ip = None alloc_id = None assoc_id = None if getattr(instance, 'vpc_id', None): r = __salt__['boto_ec2.get_eip_address_info'](addresses=ip, **base_args) if r and 'allocation_id' in r[0]: alloc_id = r[0]['allocation_id'] assoc_id = r[0].get('association_id') else: # I /believe/ this situation is impossible but let's hedge our bets... ret['result'] = False ret['comment'] = "Can't determine AllocationId for address {0}.".format(ip) return ret else: public_ip = instance.ip_address if assoc_id: # Race here - sometimes the terminate above will already have dropped this if not __salt__['boto_ec2.disassociate_eip_address'](association_id=assoc_id, **base_args): log.warning("Failed to disassociate EIP %s.", ip) if __salt__['boto_ec2.release_eip_address'](allocation_id=alloc_id, public_ip=public_ip, **base_args): log.info("Released EIP address %s", public_ip or r[0]['public_ip']) ret['changes']['old']['public_ip'] = public_ip or r[0]['public_ip'] else: ret['result'] = False ret['comment'] = "Failed to release EIP {0}.".format(ip) return ret return ret def volume_absent(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is detached and absent. .. versionadded:: 2016.11.0 name State definition name. volume_name Name tag associated with the volume. For safety, if this matches more than one volume, the state will refuse to apply. volume_id Resource ID of the volume. instance_name Only remove volume if it is attached to instance with this Name tag. Exclusive with 'instance_id'. Requires 'device'. instance_id Only remove volume if it is attached to this instance. Exclusive with 'instance_name'. Requires 'device'. device Match by device rather than ID. Requires one of 'instance_name' or 'instance_id'. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } filters = {} running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id, instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " "'instance_name', or 'instance_id' must be provided.") if (instance_name or instance_id) and not device: raise SaltInvocationError("Parameter 'device' is required when either " "'instance_name' or 'instance_id' is specified.") if volume_id: filters.update({'volume-id': volume_id}) if volume_name: filters.update({'tag:Name': volume_name}) if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instance_id: ret['comment'] = ('Instance with Name {0} not found. Assuming ' 'associated volumes gone.'.format(instance_name)) return ret if instance_id: filters.update({'attachment.instance-id': instance_id}) if device: filters.update({'attachment.device': device}) args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if not vols: ret['comment'] = 'Volume matching criteria not found, assuming already absent' return ret if len(vols) > 1: msg = "More than one volume matched criteria, can't continue in state {0}".format(name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret vol = vols[0] log.info('Matched Volume ID %s', vol) if __opts__['test']: ret['comment'] = 'The volume {0} is set to be deleted.'.format(vol) ret['result'] = None return ret if __salt__['boto_ec2.delete_volume'](volume_id=vol, force=True, **args): ret['comment'] = 'Volume {0} deleted.'.format(vol) ret['changes'] = {'old': {'volume_id': vol}, 'new': {'volume_id': None}} else: ret['comment'] = 'Error deleting volume {0}.'.format(vol) ret['result'] = False return ret def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ''' Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags would be updated.' ret['changes'] = r['changes'] ret['result'] = None else: ret['comment'] = 'Error validating requested volume tags.' ret['result'] = False return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret def private_ips_present(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, allow_reassignment=False, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI has secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be present on the ENI. allow_reassignment (Boolean) - If true, will reassign a secondary private ip address associated with another ENI. If false, state will fail if the secondary private ip address is associated with another ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to associate with the " "ENI") ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'old': [], 'new': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any new secondary private ips to add to the eni if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) ips_to_add = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['old']: ips_to_add.append(private_ip) if ips_to_add: if not __opts__['test']: # Assign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_add, 'allow_reassignment': allow_reassignment, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.assign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly assigned to ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_added = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['new']: ips_not_added.append(private_ip) # Display results if ips_not_added: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to add: {1}\n' 'could not add the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_add), '\n\t- ' + '\n\t- '.join(ips_not_added))) else: ret['comment'] = "added ips: {0}".format( '\n\t- ' + '\n\t- '.join(ips_to_add)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to add ret['comment'] = ('ips on eni: {0}\n' 'ips that would be added: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_add))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on eni: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret def private_ips_absent(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI does not have secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be absent on the ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to unassociate with " "the ENI") if not isinstance(private_ip_addresses, list): private_ip_addresses = [private_ip_addresses] ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'new': [], 'old': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any old private ips to remove from the eni primary_private_ip = None if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) if eni_pip['primary']: primary_private_ip = eni_pip['private_ip_address'] ips_to_remove = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['old']: ips_to_remove.append(private_ip) if private_ip == primary_private_ip: ret['result'] = False ret['comment'] = ('You cannot unassign the primary private ip address ({0}) on an ' 'eni\n' 'ips on eni: {1}\n' 'attempted to remove: {2}\n'.format( primary_private_ip, '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(private_ip_addresses))) ret['changes'] = {} return ret if ips_to_remove: if not __opts__['test']: # Unassign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_remove, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.unassign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly unassigned from ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_removed = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['new']: ips_not_removed.append(private_ip) if ips_not_removed: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to remove: {1}\n' 'could not remove the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_remove), '\n\t- ' + '\n\t- '.join(ips_not_removed))) else: ret['comment'] = "removed ips: {0}".format('\n\t- ' + '\n\t- '.join(ips_to_remove)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to remove ret['comment'] = ('ips on eni: {0}\n' 'ips that would be removed: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_remove))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on network interface: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret
saltstack/salt
salt/states/boto_ec2.py
private_ips_present
python
def private_ips_present(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, allow_reassignment=False, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI has secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be present on the ENI. allow_reassignment (Boolean) - If true, will reassign a secondary private ip address associated with another ENI. If false, state will fail if the secondary private ip address is associated with another ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to associate with the " "ENI") ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'old': [], 'new': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any new secondary private ips to add to the eni if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) ips_to_add = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['old']: ips_to_add.append(private_ip) if ips_to_add: if not __opts__['test']: # Assign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_add, 'allow_reassignment': allow_reassignment, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.assign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly assigned to ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_added = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['new']: ips_not_added.append(private_ip) # Display results if ips_not_added: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to add: {1}\n' 'could not add the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_add), '\n\t- ' + '\n\t- '.join(ips_not_added))) else: ret['comment'] = "added ips: {0}".format( '\n\t- ' + '\n\t- '.join(ips_to_add)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to add ret['comment'] = ('ips on eni: {0}\n' 'ips that would be added: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_add))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on eni: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret
Ensure an ENI has secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be present on the ENI. allow_reassignment (Boolean) - If true, will reassign a secondary private ip address associated with another ENI. If false, state will fail if the secondary private ip address is associated with another ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_ec2.py#L1512-L1634
null
# -*- coding: utf-8 -*- ''' Manage EC2 .. versionadded:: 2015.8.0 This module provides an interface to the Elastic Compute Cloud (EC2) service from AWS. The below code creates a key pair: .. code-block:: yaml create-key-pair: boto_ec2.key_present: - name: mykeypair - save_private: /root/ - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: 'ssh-rsa AAAA' - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs You can also use salt:// in order to define the public key. .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: salt://mybase/public_key.pub - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs The below code deletes a key pair: .. code-block:: yaml delete-key-pair: boto_ec2.key_absent: - name: mykeypair - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging from time import time, sleep # Import salt libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.utils.data import salt.utils.dictupdate as dictupdate from salt.exceptions import SaltInvocationError, CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' if 'boto_ec2.get_key' in __salt__: return 'boto_ec2' else: return False def key_present(name, save_private=None, upload_public=None, region=None, key=None, keyid=None, profile=None): ''' Ensure key pair is present. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) log.debug('exists is %s', exists) if upload_public is not None and 'salt://' in upload_public: try: upload_public = __salt__['cp.get_file_str'](upload_public) except IOError as e: log.debug(e) ret['comment'] = 'File {0} not found.'.format(upload_public) ret['result'] = False return ret if not exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be created.'.format(name) ret['result'] = None return ret if save_private and not upload_public: created = __salt__['boto_ec2.create_key']( name, save_private, region, key, keyid, profile ) if created: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['new'] = created else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) elif not save_private and upload_public: imported = __salt__['boto_ec2.import_key'](name, upload_public, region, key, keyid, profile) if imported: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['old'] = None ret['changes']['new'] = imported else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) else: ret['result'] = False ret['comment'] = 'You can either upload or download a private key ' else: ret['result'] = True ret['comment'] = 'The key name {0} already exists'.format(name) return ret def key_absent(name, region=None, key=None, keyid=None, profile=None): ''' Deletes a key pair ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be deleted.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_ec2.delete_key'](name, region, key, keyid, profile) log.debug('exists is %s', deleted) if deleted: ret['result'] = True ret['comment'] = 'The key {0} is deleted.'.format(name) ret['changes']['old'] = name else: ret['result'] = False ret['comment'] = 'Could not delete key {0} '.format(name) else: ret['result'] = True ret['comment'] = 'The key name {0} does not exist'.format(name) return ret def eni_present( name, subnet_id=None, subnet_name=None, private_ip_address=None, description=None, groups=None, source_dest_check=True, allocate_eip=None, arecords=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI exists. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. subnet_id The VPC subnet ID the ENI will exist within. subnet_name The VPC subnet name the ENI will exist within. private_ip_address The private ip address to use for this ENI. If this is not specified AWS will automatically assign a private IP address to the ENI. Must be specified at creation time; will be ignored afterward. description Description of the key. groups A list of security groups to apply to the ENI. source_dest_check Boolean specifying whether source/destination checking is enabled on the ENI. allocate_eip allocate and associate an EIP to the ENI. Could be 'standard' to allocate Elastic IP to EC2 region or 'vpc' to get it for a particular VPC .. versionchanged:: 2016.11.0 arecords A list of arecord dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((subnet_id, subnet_name)): raise SaltInvocationError('One (but not both) of subnet_id or ' 'subnet_name must be provided.') if not groups: raise SaltInvocationError('groups is a required argument.') if not isinstance(groups, list): raise SaltInvocationError('groups must be a list.') if not isinstance(source_dest_check, bool): raise SaltInvocationError('source_dest_check must be a bool.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be created.' if allocate_eip: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated/assocaited to the ENI.']) if arecords: ret['comment'] = ' '.join([ret['comment'], 'A records are set to be created.']) ret['result'] = None return ret result_create = __salt__['boto_ec2.create_network_interface']( name, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, description=description, groups=groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_create: ret['result'] = False ret['comment'] = 'Failed to create ENI: {0}'.format( result_create['error']['message'] ) return ret r['result'] = result_create['result'] ret['comment'] = 'Created ENI {0}'.format(name) ret['changes']['id'] = r['result']['id'] else: _ret = _eni_attribute( r['result'], 'description', description, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = _ret['comment'] if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _eni_groups( r['result'], groups, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret # Actions that need to occur whether creating or updating _ret = _eni_attribute( r['result'], 'source_dest_check', source_dest_check, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret if allocate_eip: if 'allocationId' not in r['result']: if __opts__['test']: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated and assocaited to the ENI.']) else: domain = 'vpc' if allocate_eip == 'vpc' else None eip_alloc = __salt__['boto_ec2.allocate_eip_address'](domain=domain, region=region, key=key, keyid=keyid, profile=profile) if eip_alloc: _ret = __salt__['boto_ec2.associate_eip_address'](instance_id=None, instance_name=None, public_ip=None, allocation_id=eip_alloc['allocation_id'], network_interface_id=r['result']['id'], private_ip_address=None, allow_reassociation=False, region=region, key=key, keyid=keyid, profile=profile) if not _ret: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=eip_alloc['allocation_id'], region=region, key=key, keyid=keyid, profile=profile) ret['result'] = False msg = 'Failed to assocaite the allocated EIP address with the ENI. The EIP {0}'.format('was successfully released.' if _ret else 'was NOT RELEASED.') ret['comment'] = ' '.join([ret['comment'], msg]) return ret else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to allocate an EIP address']) return ret else: ret['comment'] = ' '.join([ret['comment'], 'An EIP is already allocated/assocaited to the ENI']) if arecords: for arecord in arecords: if 'name' not in arecord: msg = 'The arecord must contain a "name" property.' raise SaltInvocationError(msg) log.debug('processing arecord %s', arecord) _ret = None dns_provider = 'boto_route53' arecord['record_type'] = 'A' public_ip_arecord = False if 'public' in arecord: public_ip_arecord = arecord.pop('public') if public_ip_arecord: if 'publicIp' in r['result']: arecord['value'] = r['result']['publicIp'] elif 'public_ip' in eip_alloc: arecord['value'] = eip_alloc['public_ip'] else: msg = 'Unable to add an A record for the public IP address, a public IP address does not seem to be allocated to this ENI.' raise CommandExecutionError(msg) else: arecord['value'] = r['result']['private_ip_address'] if 'provider' in arecord: dns_provider = arecord.pop('provider') if dns_provider == 'boto_route53': if 'profile' not in arecord: arecord['profile'] = profile if 'key' not in arecord: arecord['key'] = key if 'keyid' not in arecord: arecord['keyid'] = keyid if 'region' not in arecord: arecord['region'] = region _ret = __states__['.'.join([dns_provider, 'present'])](**arecord) log.debug('ret from dns_provider.present = %s', _ret) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret return ret def _eni_attribute(metadata, attr, value, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if metadata[attr] == value: return ret if __opts__['test']: ret['comment'] = 'ENI set to have {0} updated.'.format(attr) ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr=attr, value=value, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI {0}: {1}.' ret['result'] = False ret['comment'] = msg.format(attr, result_update['error']['message']) else: ret['comment'] = 'Updated ENI {0}.'.format(attr) ret['changes'][attr] = { 'old': metadata[attr], 'new': value } return ret def _eni_groups(metadata, groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} group_ids = [g['id'] for g in metadata['groups']] group_ids.sort() _groups = __salt__['boto_secgroup.convert_to_group_ids']( groups, vpc_id=metadata['vpc_id'], region=region, key=key, keyid=keyid, profile=profile ) if not _groups: ret['comment'] = 'Could not find secgroup ids for provided groups.' ret['result'] = False _groups.sort() if group_ids == _groups: return ret if __opts__['test']: ret['comment'] = 'ENI set to have groups updated.' ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr='groups', value=_groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI groups: {1}.' ret['result'] = False ret['comment'] = msg.format(result_update['error']['message']) else: ret['comment'] = 'Updated ENI groups.' ret['changes']['groups'] = { 'old': group_ids, 'new': _groups } return ret def eni_absent( name, release_eip=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI is absent. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. release_eip True/False - release any EIP associated with the ENI region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' ret['result'] = None return ret else: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' if release_eip and 'allocationId' in r['result']: ret['comment'] = ' '.join([ret['comment'], 'Allocated/associated EIP is set to be released']) ret['result'] = None return ret if 'id' in r['result']['attachment']: result_detach = __salt__['boto_ec2.detach_network_interface']( name=name, force=True, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_detach: ret['result'] = False ret['comment'] = 'Failed to detach ENI: {0}'.format( result_detach['error']['message'] ) return ret # TODO: Ensure the detach occurs before continuing result_delete = __salt__['boto_ec2.delete_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_delete: ret['result'] = False ret['comment'] = 'Failed to delete ENI: {0}'.format( result_delete['error']['message'] ) return ret ret['comment'] = 'Deleted ENI {0}'.format(name) ret['changes']['id'] = None if release_eip and 'allocationId' in r['result']: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=r['result']['allocationId'], region=region, key=key, keyid=keyid, profile=profile) if not _ret: ret['comment'] = ' '.join([ret['comment'], 'Failed to release EIP allocated to the ENI.']) ret['result'] = False return ret else: ret['comment'] = ' '.join([ret['comment'], 'EIP released.']) ret['changes']['eip released'] = True return ret def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs): ''' Create a snapshot from the given instance .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs): ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name) ret['result'] = False return ret ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name) ret['changes']['new'] = {ami_name: ami_name} if not wait_until_available: return ret starttime = time() while True: images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs) if images and images[0].state == 'available': break if time() - starttime > wait_timeout_seconds: if images: ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state) else: ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name) ret['result'] = False return ret sleep(5) return ret def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.data.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instances: _create = True elif len(instances) > 1: log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id') instance_id = None # No way to know, we'll just have to bail later.... else: instance_id = instances[0] if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if image_ids: image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address %s allocated.", r['public_ip']) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info( 'Waiting up to %s secs for new EIP %s to become available', tries * secs, public_ip or allocation_id ) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: if 'new' not in ret['changes']: ret['changes']['new'] = {} ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) curr = {} if not isinstance(curr, dict) else curr if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True if tags and instance_id is not None: tags = dict(tags) curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id}, region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {})) current = set(curr_tags.keys()) desired = set(tags.keys()) remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set... add = dict([(t, tags[t]) for t in desired - current]) replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)]) # Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative. add.update(replace) if add or remove: if __opts__['test']: ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if instance_name else name) else: if remove: if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while deleting tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret if add: if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while creating tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags return ret def instance_absent(name, instance_name=None, instance_id=None, release_eip=False, region=None, key=None, keyid=None, profile=None, filters=None): ''' Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12 ''' ### TODO - Implement 'force' option?? Would automagically turn off ### 'disableApiTermination', as needed, before trying to delete. ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not instance_id: try: instance_id = __salt__['boto_ec2.get_id'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states, filters=filters) except CommandExecutionError as e: ret['result'] = None ret['comment'] = ("Couldn't determine current status of instance " "{0}.".format(instance_name or name)) return ret instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, return_objs=True, filters=filters) if not instances: ret['result'] = True ret['comment'] = 'Instance {0} is already gone.'.format(instance_id) return ret instance = instances[0] ### Honor 'disableApiTermination' - if you want to override it, first use set_attribute() to turn it off no_can_do = __salt__['boto_ec2.get_attribute']('disableApiTermination', instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) if no_can_do.get('disableApiTermination') is True: ret['result'] = False ret['comment'] = 'Termination of instance {0} via the API is disabled.'.format(instance_id) return ret if __opts__['test']: ret['comment'] = 'The instance {0} is set to be deleted.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.terminate'](instance_id=instance_id, name=instance_name, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to terminate instance {0}.'.format(instance_id) return ret ret['changes']['old'] = {'instance_id': instance_id} ret['changes']['new'] = None if release_eip: ip = getattr(instance, 'ip_address', None) if ip: base_args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} public_ip = None alloc_id = None assoc_id = None if getattr(instance, 'vpc_id', None): r = __salt__['boto_ec2.get_eip_address_info'](addresses=ip, **base_args) if r and 'allocation_id' in r[0]: alloc_id = r[0]['allocation_id'] assoc_id = r[0].get('association_id') else: # I /believe/ this situation is impossible but let's hedge our bets... ret['result'] = False ret['comment'] = "Can't determine AllocationId for address {0}.".format(ip) return ret else: public_ip = instance.ip_address if assoc_id: # Race here - sometimes the terminate above will already have dropped this if not __salt__['boto_ec2.disassociate_eip_address'](association_id=assoc_id, **base_args): log.warning("Failed to disassociate EIP %s.", ip) if __salt__['boto_ec2.release_eip_address'](allocation_id=alloc_id, public_ip=public_ip, **base_args): log.info("Released EIP address %s", public_ip or r[0]['public_ip']) ret['changes']['old']['public_ip'] = public_ip or r[0]['public_ip'] else: ret['result'] = False ret['comment'] = "Failed to release EIP {0}.".format(ip) return ret return ret def volume_absent(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is detached and absent. .. versionadded:: 2016.11.0 name State definition name. volume_name Name tag associated with the volume. For safety, if this matches more than one volume, the state will refuse to apply. volume_id Resource ID of the volume. instance_name Only remove volume if it is attached to instance with this Name tag. Exclusive with 'instance_id'. Requires 'device'. instance_id Only remove volume if it is attached to this instance. Exclusive with 'instance_name'. Requires 'device'. device Match by device rather than ID. Requires one of 'instance_name' or 'instance_id'. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } filters = {} running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id, instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " "'instance_name', or 'instance_id' must be provided.") if (instance_name or instance_id) and not device: raise SaltInvocationError("Parameter 'device' is required when either " "'instance_name' or 'instance_id' is specified.") if volume_id: filters.update({'volume-id': volume_id}) if volume_name: filters.update({'tag:Name': volume_name}) if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instance_id: ret['comment'] = ('Instance with Name {0} not found. Assuming ' 'associated volumes gone.'.format(instance_name)) return ret if instance_id: filters.update({'attachment.instance-id': instance_id}) if device: filters.update({'attachment.device': device}) args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if not vols: ret['comment'] = 'Volume matching criteria not found, assuming already absent' return ret if len(vols) > 1: msg = "More than one volume matched criteria, can't continue in state {0}".format(name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret vol = vols[0] log.info('Matched Volume ID %s', vol) if __opts__['test']: ret['comment'] = 'The volume {0} is set to be deleted.'.format(vol) ret['result'] = None return ret if __salt__['boto_ec2.delete_volume'](volume_id=vol, force=True, **args): ret['comment'] = 'Volume {0} deleted.'.format(vol) ret['changes'] = {'old': {'volume_id': vol}, 'new': {'volume_id': None}} else: ret['comment'] = 'Error deleting volume {0}.'.format(vol) ret['result'] = False return ret def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ''' Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags would be updated.' ret['changes'] = r['changes'] ret['result'] = None else: ret['comment'] = 'Error validating requested volume tags.' ret['result'] = False return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret def volume_present(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} old_dict = {} new_dict = {} running_states = ('running', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " " must be provided.") if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'instance_name', or 'instance_id'" " must be provided.") if device is None: raise SaltInvocationError("Parameter 'device' is required.") args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, in_states=running_states, **args) if not instance_id: raise SaltInvocationError('Instance with Name {0} not found.'.format(instance_name)) instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, return_objs=True, **args) instance = instances[0] if volume_name: filters = {} filters.update({'tag:Name': volume_name}) vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if len(vols) > 1: msg = "More than one volume matched volume name {0}, can't continue in state {1}".format(volume_name, name) raise SaltInvocationError(msg) if not vols: if __opts__['test']: ret['comment'] = ('The volume with name {0} is set to be created and attached' ' on {1}({2}).'.format(volume_name, instance_id, device)) ret['result'] = None return ret _rt = __salt__['boto_ec2.create_volume'](zone_name=instance.placement, size=size, snapshot_id=snapshot_id, volume_type=volume_type, iops=iops, encrypted=encrypted, kms_key_id=kms_key_id, wait_for_creation=True, **args) if 'result' in _rt: volume_id = _rt['result'] else: raise SaltInvocationError('Error creating volume with name {0}.'.format(volume_name)) _rt = __salt__['boto_ec2.set_volumes_tags'](tag_maps=[{ 'filters': {'volume_ids': [volume_id]}, 'tags': {'Name': volume_name} }], **args) if _rt['success'] is False: raise SaltInvocationError('Error updating requested volume ' '{0} with name {1}. {2}'.format(volume_id, volume_name, _rt['comment'])) old_dict['volume_id'] = None new_dict['volume_id'] = volume_id else: volume_id = vols[0] vols = __salt__['boto_ec2.get_all_volumes'](volume_ids=[volume_id], return_objs=True, **args) if not vols: raise SaltInvocationError('Volume {0} do not exist'.format(volume_id)) vol = vols[0] if vol.zone != instance.placement: raise SaltInvocationError(('Volume {0} in {1} cannot attach to instance' ' {2} in {3}.').format(volume_id, vol.zone, instance_id, instance.placement)) attach_data = vol.attach_data if attach_data is not None and attach_data.instance_id is not None: if instance_id == attach_data.instance_id and device == attach_data.device: ret['comment'] = 'The volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device) return ret else: if __opts__['test']: ret['comment'] = ('The volume {0} is set to be detached' ' from {1}({2} and attached on {3}({4}).').format(attach_data.instance_id, attach_data.devic, volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.detach_volume'](volume_id=volume_id, wait_for_detachement=True, **args): ret['comment'] = 'Volume {0} is detached from {1}({2}).'.format(volume_id, attach_data.instance_id, attach_data.device) old_dict['instance_id'] = attach_data.instance_id old_dict['device'] = attach_data.device else: raise SaltInvocationError(('The volume {0} is already attached on instance {1}({2}).' ' Failed to detach').format(volume_id, attach_data.instance_id, attach_data.device)) else: old_dict['instance_id'] = instance_id old_dict['device'] = None if __opts__['test']: ret['comment'] = 'The volume {0} is set to be attached on {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.attach_volume'](volume_id=volume_id, instance_id=instance_id, device=device, **args): ret['comment'] = ' '.join([ ret['comment'], 'Volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device)]) new_dict['instance_id'] = instance_id new_dict['device'] = device ret['changes'] = {'old': old_dict, 'new': new_dict} else: ret['comment'] = 'Error attaching volume {0} to instance {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = False return ret def private_ips_absent(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI does not have secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be absent on the ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to unassociate with " "the ENI") if not isinstance(private_ip_addresses, list): private_ip_addresses = [private_ip_addresses] ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'new': [], 'old': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any old private ips to remove from the eni primary_private_ip = None if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) if eni_pip['primary']: primary_private_ip = eni_pip['private_ip_address'] ips_to_remove = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['old']: ips_to_remove.append(private_ip) if private_ip == primary_private_ip: ret['result'] = False ret['comment'] = ('You cannot unassign the primary private ip address ({0}) on an ' 'eni\n' 'ips on eni: {1}\n' 'attempted to remove: {2}\n'.format( primary_private_ip, '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(private_ip_addresses))) ret['changes'] = {} return ret if ips_to_remove: if not __opts__['test']: # Unassign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_remove, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.unassign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly unassigned from ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_removed = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['new']: ips_not_removed.append(private_ip) if ips_not_removed: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to remove: {1}\n' 'could not remove the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_remove), '\n\t- ' + '\n\t- '.join(ips_not_removed))) else: ret['comment'] = "removed ips: {0}".format('\n\t- ' + '\n\t- '.join(ips_to_remove)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to remove ret['comment'] = ('ips on eni: {0}\n' 'ips that would be removed: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_remove))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on network interface: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret
saltstack/salt
salt/states/boto_ec2.py
private_ips_absent
python
def private_ips_absent(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI does not have secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be absent on the ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to unassociate with " "the ENI") if not isinstance(private_ip_addresses, list): private_ip_addresses = [private_ip_addresses] ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'new': [], 'old': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any old private ips to remove from the eni primary_private_ip = None if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) if eni_pip['primary']: primary_private_ip = eni_pip['private_ip_address'] ips_to_remove = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['old']: ips_to_remove.append(private_ip) if private_ip == primary_private_ip: ret['result'] = False ret['comment'] = ('You cannot unassign the primary private ip address ({0}) on an ' 'eni\n' 'ips on eni: {1}\n' 'attempted to remove: {2}\n'.format( primary_private_ip, '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(private_ip_addresses))) ret['changes'] = {} return ret if ips_to_remove: if not __opts__['test']: # Unassign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_remove, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.unassign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly unassigned from ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_removed = [] for private_ip in private_ip_addresses: if private_ip in ret['changes']['new']: ips_not_removed.append(private_ip) if ips_not_removed: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to remove: {1}\n' 'could not remove the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_remove), '\n\t- ' + '\n\t- '.join(ips_not_removed))) else: ret['comment'] = "removed ips: {0}".format('\n\t- ' + '\n\t- '.join(ips_to_remove)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to remove ret['comment'] = ('ips on eni: {0}\n' 'ips that would be removed: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_remove))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on network interface: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret
Ensure an ENI does not have secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be absent on the ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/boto_ec2.py#L1637-L1767
null
# -*- coding: utf-8 -*- ''' Manage EC2 .. versionadded:: 2015.8.0 This module provides an interface to the Elastic Compute Cloud (EC2) service from AWS. The below code creates a key pair: .. code-block:: yaml create-key-pair: boto_ec2.key_present: - name: mykeypair - save_private: /root/ - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: 'ssh-rsa AAAA' - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs You can also use salt:// in order to define the public key. .. code-block:: yaml import-key-pair: boto_ec2.key_present: - name: mykeypair - upload_public: salt://mybase/public_key.pub - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs The below code deletes a key pair: .. code-block:: yaml delete-key-pair: boto_ec2.key_absent: - name: mykeypair - region: eu-west-1 - keyid: GKTADJGHEIQSXMKKRBJ08H - key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs ''' # Import Python Libs from __future__ import absolute_import, print_function, unicode_literals import logging from time import time, sleep # Import salt libs from salt.ext import six from salt.ext.six.moves import range # pylint: disable=import-error,no-name-in-module,redefined-builtin import salt.utils.data import salt.utils.dictupdate as dictupdate from salt.exceptions import SaltInvocationError, CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): ''' Only load if boto is available. ''' if 'boto_ec2.get_key' in __salt__: return 'boto_ec2' else: return False def key_present(name, save_private=None, upload_public=None, region=None, key=None, keyid=None, profile=None): ''' Ensure key pair is present. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) log.debug('exists is %s', exists) if upload_public is not None and 'salt://' in upload_public: try: upload_public = __salt__['cp.get_file_str'](upload_public) except IOError as e: log.debug(e) ret['comment'] = 'File {0} not found.'.format(upload_public) ret['result'] = False return ret if not exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be created.'.format(name) ret['result'] = None return ret if save_private and not upload_public: created = __salt__['boto_ec2.create_key']( name, save_private, region, key, keyid, profile ) if created: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['new'] = created else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) elif not save_private and upload_public: imported = __salt__['boto_ec2.import_key'](name, upload_public, region, key, keyid, profile) if imported: ret['result'] = True ret['comment'] = 'The key {0} is created.'.format(name) ret['changes']['old'] = None ret['changes']['new'] = imported else: ret['result'] = False ret['comment'] = 'Could not create key {0} '.format(name) else: ret['result'] = False ret['comment'] = 'You can either upload or download a private key ' else: ret['result'] = True ret['comment'] = 'The key name {0} already exists'.format(name) return ret def key_absent(name, region=None, key=None, keyid=None, profile=None): ''' Deletes a key pair ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } exists = __salt__['boto_ec2.get_key'](name, region, key, keyid, profile) if exists: if __opts__['test']: ret['comment'] = 'The key {0} is set to be deleted.'.format(name) ret['result'] = None return ret deleted = __salt__['boto_ec2.delete_key'](name, region, key, keyid, profile) log.debug('exists is %s', deleted) if deleted: ret['result'] = True ret['comment'] = 'The key {0} is deleted.'.format(name) ret['changes']['old'] = name else: ret['result'] = False ret['comment'] = 'Could not delete key {0} '.format(name) else: ret['result'] = True ret['comment'] = 'The key name {0} does not exist'.format(name) return ret def eni_present( name, subnet_id=None, subnet_name=None, private_ip_address=None, description=None, groups=None, source_dest_check=True, allocate_eip=None, arecords=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI exists. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. subnet_id The VPC subnet ID the ENI will exist within. subnet_name The VPC subnet name the ENI will exist within. private_ip_address The private ip address to use for this ENI. If this is not specified AWS will automatically assign a private IP address to the ENI. Must be specified at creation time; will be ignored afterward. description Description of the key. groups A list of security groups to apply to the ENI. source_dest_check Boolean specifying whether source/destination checking is enabled on the ENI. allocate_eip allocate and associate an EIP to the ENI. Could be 'standard' to allocate Elastic IP to EC2 region or 'vpc' to get it for a particular VPC .. versionchanged:: 2016.11.0 arecords A list of arecord dicts with attributes needed for the DNS add_record state. By default the boto_route53.add_record state will be used, which requires: name, zone, ttl, and identifier. See the boto_route53 state for information about these attributes. Other DNS modules can be called by specifying the provider keyword. By default, the private ENI IP address will be used, set 'public: True' in the arecord dict to use the ENI's public IP address .. versionadded:: 2016.3.0 region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((subnet_id, subnet_name)): raise SaltInvocationError('One (but not both) of subnet_id or ' 'subnet_name must be provided.') if not groups: raise SaltInvocationError('groups is a required argument.') if not isinstance(groups, list): raise SaltInvocationError('groups must be a list.') if not isinstance(source_dest_check, bool): raise SaltInvocationError('source_dest_check must be a bool.') ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be created.' if allocate_eip: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated/assocaited to the ENI.']) if arecords: ret['comment'] = ' '.join([ret['comment'], 'A records are set to be created.']) ret['result'] = None return ret result_create = __salt__['boto_ec2.create_network_interface']( name, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, description=description, groups=groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_create: ret['result'] = False ret['comment'] = 'Failed to create ENI: {0}'.format( result_create['error']['message'] ) return ret r['result'] = result_create['result'] ret['comment'] = 'Created ENI {0}'.format(name) ret['changes']['id'] = r['result']['id'] else: _ret = _eni_attribute( r['result'], 'description', description, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = _ret['comment'] if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret _ret = _eni_groups( r['result'], groups, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret # Actions that need to occur whether creating or updating _ret = _eni_attribute( r['result'], 'source_dest_check', source_dest_check, region, key, keyid, profile ) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] return ret if allocate_eip: if 'allocationId' not in r['result']: if __opts__['test']: ret['comment'] = ' '.join([ret['comment'], 'An EIP is set to be allocated and assocaited to the ENI.']) else: domain = 'vpc' if allocate_eip == 'vpc' else None eip_alloc = __salt__['boto_ec2.allocate_eip_address'](domain=domain, region=region, key=key, keyid=keyid, profile=profile) if eip_alloc: _ret = __salt__['boto_ec2.associate_eip_address'](instance_id=None, instance_name=None, public_ip=None, allocation_id=eip_alloc['allocation_id'], network_interface_id=r['result']['id'], private_ip_address=None, allow_reassociation=False, region=region, key=key, keyid=keyid, profile=profile) if not _ret: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=eip_alloc['allocation_id'], region=region, key=key, keyid=keyid, profile=profile) ret['result'] = False msg = 'Failed to assocaite the allocated EIP address with the ENI. The EIP {0}'.format('was successfully released.' if _ret else 'was NOT RELEASED.') ret['comment'] = ' '.join([ret['comment'], msg]) return ret else: ret['result'] = False ret['comment'] = ' '.join([ret['comment'], 'Failed to allocate an EIP address']) return ret else: ret['comment'] = ' '.join([ret['comment'], 'An EIP is already allocated/assocaited to the ENI']) if arecords: for arecord in arecords: if 'name' not in arecord: msg = 'The arecord must contain a "name" property.' raise SaltInvocationError(msg) log.debug('processing arecord %s', arecord) _ret = None dns_provider = 'boto_route53' arecord['record_type'] = 'A' public_ip_arecord = False if 'public' in arecord: public_ip_arecord = arecord.pop('public') if public_ip_arecord: if 'publicIp' in r['result']: arecord['value'] = r['result']['publicIp'] elif 'public_ip' in eip_alloc: arecord['value'] = eip_alloc['public_ip'] else: msg = 'Unable to add an A record for the public IP address, a public IP address does not seem to be allocated to this ENI.' raise CommandExecutionError(msg) else: arecord['value'] = r['result']['private_ip_address'] if 'provider' in arecord: dns_provider = arecord.pop('provider') if dns_provider == 'boto_route53': if 'profile' not in arecord: arecord['profile'] = profile if 'key' not in arecord: arecord['key'] = key if 'keyid' not in arecord: arecord['keyid'] = keyid if 'region' not in arecord: arecord['region'] = region _ret = __states__['.'.join([dns_provider, 'present'])](**arecord) log.debug('ret from dns_provider.present = %s', _ret) ret['changes'] = dictupdate.update(ret['changes'], _ret['changes']) ret['comment'] = ' '.join([ret['comment'], _ret['comment']]) if not _ret['result']: ret['result'] = _ret['result'] if ret['result'] is False: return ret return ret def _eni_attribute(metadata, attr, value, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} if metadata[attr] == value: return ret if __opts__['test']: ret['comment'] = 'ENI set to have {0} updated.'.format(attr) ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr=attr, value=value, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI {0}: {1}.' ret['result'] = False ret['comment'] = msg.format(attr, result_update['error']['message']) else: ret['comment'] = 'Updated ENI {0}.'.format(attr) ret['changes'][attr] = { 'old': metadata[attr], 'new': value } return ret def _eni_groups(metadata, groups, region, key, keyid, profile): ret = {'result': True, 'comment': '', 'changes': {}} group_ids = [g['id'] for g in metadata['groups']] group_ids.sort() _groups = __salt__['boto_secgroup.convert_to_group_ids']( groups, vpc_id=metadata['vpc_id'], region=region, key=key, keyid=keyid, profile=profile ) if not _groups: ret['comment'] = 'Could not find secgroup ids for provided groups.' ret['result'] = False _groups.sort() if group_ids == _groups: return ret if __opts__['test']: ret['comment'] = 'ENI set to have groups updated.' ret['result'] = None return ret result_update = __salt__['boto_ec2.modify_network_interface_attribute']( network_interface_id=metadata['id'], attr='groups', value=_groups, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_update: msg = 'Failed to update ENI groups: {1}.' ret['result'] = False ret['comment'] = msg.format(result_update['error']['message']) else: ret['comment'] = 'Updated ENI groups.' ret['changes']['groups'] = { 'old': group_ids, 'new': _groups } return ret def eni_absent( name, release_eip=False, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 ENI is absent. .. versionadded:: 2016.3.0 name Name tag associated with the ENI. release_eip True/False - release any EIP associated with the ENI region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_ec2.get_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in r: ret['result'] = False ret['comment'] = 'Error when attempting to find eni: {0}.'.format( r['error']['message'] ) return ret if not r['result']: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' ret['result'] = None return ret else: if __opts__['test']: ret['comment'] = 'ENI is set to be deleted.' if release_eip and 'allocationId' in r['result']: ret['comment'] = ' '.join([ret['comment'], 'Allocated/associated EIP is set to be released']) ret['result'] = None return ret if 'id' in r['result']['attachment']: result_detach = __salt__['boto_ec2.detach_network_interface']( name=name, force=True, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_detach: ret['result'] = False ret['comment'] = 'Failed to detach ENI: {0}'.format( result_detach['error']['message'] ) return ret # TODO: Ensure the detach occurs before continuing result_delete = __salt__['boto_ec2.delete_network_interface']( name=name, region=region, key=key, keyid=keyid, profile=profile ) if 'error' in result_delete: ret['result'] = False ret['comment'] = 'Failed to delete ENI: {0}'.format( result_delete['error']['message'] ) return ret ret['comment'] = 'Deleted ENI {0}'.format(name) ret['changes']['id'] = None if release_eip and 'allocationId' in r['result']: _ret = __salt__['boto_ec2.release_eip_address'](public_ip=None, allocation_id=r['result']['allocationId'], region=region, key=key, keyid=keyid, profile=profile) if not _ret: ret['comment'] = ' '.join([ret['comment'], 'Failed to release EIP allocated to the ENI.']) ret['result'] = False return ret else: ret['comment'] = ' '.join([ret['comment'], 'EIP released.']) ret['changes']['eip released'] = True return ret def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs): ''' Create a snapshot from the given instance .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs): ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name) ret['result'] = False return ret ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name) ret['changes']['new'] = {ami_name: ami_name} if not wait_until_available: return ret starttime = time() while True: images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs) if images and images[0].state == 'available': break if time() - starttime > wait_timeout_seconds: if images: ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state) else: ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name) ret['result'] = False return ret sleep(5) return ret def instance_present(name, instance_name=None, instance_id=None, image_id=None, image_name=None, tags=None, key_name=None, security_groups=None, user_data=None, instance_type=None, placement=None, kernel_id=None, ramdisk_id=None, vpc_id=None, vpc_name=None, monitoring_enabled=None, subnet_id=None, subnet_name=None, private_ip_address=None, block_device_map=None, disable_api_termination=None, instance_initiated_shutdown_behavior=None, placement_group=None, client_token=None, security_group_ids=None, security_group_names=None, additional_info=None, tenancy=None, instance_profile_arn=None, instance_profile_name=None, ebs_optimized=None, network_interfaces=None, network_interface_name=None, network_interface_id=None, attributes=None, target_state=None, public_ip=None, allocation_id=None, allocate_eip=False, region=None, key=None, keyid=None, profile=None): ### TODO - implement 'target_state={running, stopped}' ''' Ensure an EC2 instance is running with the given attributes and state. name (string) - The name of the state definition. Recommended that this match the instance_name attribute (generally the FQDN of the instance). instance_name (string) - The name of the instance, generally its FQDN. Exclusive with 'instance_id'. instance_id (string) - The ID of the instance (if known). Exclusive with 'instance_name'. image_id (string) – The ID of the AMI image to run. image_name (string) – The name of the AMI image to run. tags (dict) - Tags to apply to the instance. key_name (string) – The name of the key pair with which to launch instances. security_groups (list of strings) – The names of the EC2 classic security groups with which to associate instances user_data (string) – The Base64-encoded MIME user data to be made available to the instance(s) in this reservation. instance_type (string) – The EC2 instance size/type. Note that only certain types are compatible with HVM based AMIs. placement (string) – The Availability Zone to launch the instance into. kernel_id (string) – The ID of the kernel with which to launch the instances. ramdisk_id (string) – The ID of the RAM disk with which to launch the instances. vpc_id (string) - The ID of a VPC to attach the instance to. vpc_name (string) - The name of a VPC to attach the instance to. monitoring_enabled (bool) – Enable detailed CloudWatch monitoring on the instance. subnet_id (string) – The ID of the subnet within which to launch the instances for VPC. subnet_name (string) – The name of the subnet within which to launch the instances for VPC. private_ip_address (string) – If you’re using VPC, you can optionally use this parameter to assign the instance a specific available IP address from the subnet (e.g., 10.0.0.25). block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) – A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. disable_api_termination (bool) – If True, the instances will be locked and will not be able to be terminated via the API. instance_initiated_shutdown_behavior (string) – Specifies whether the instance stops or terminates on instance-initiated shutdown. Valid values are: - 'stop' - 'terminate' placement_group (string) – If specified, this is the name of the placement group in which the instance(s) will be launched. client_token (string) – Unique, case-sensitive identifier you provide to ensure idempotency of the request. Maximum 64 ASCII characters. security_group_ids (list of strings) – The IDs of the VPC security groups with which to associate instances. security_group_names (list of strings) – The names of the VPC security groups with which to associate instances. additional_info (string) – Specifies additional information to make available to the instance(s). tenancy (string) – The tenancy of the instance you want to launch. An instance with a tenancy of ‘dedicated’ runs on single-tenant hardware and can only be launched into a VPC. Valid values are:”default” or “dedicated”. NOTE: To use dedicated tenancy you MUST specify a VPC subnet-ID as well. instance_profile_arn (string) – The Amazon resource name (ARN) of the IAM Instance Profile (IIP) to associate with the instances. instance_profile_name (string) – The name of the IAM Instance Profile (IIP) to associate with the instances. ebs_optimized (bool) – Whether the instance is optimized for EBS I/O. This optimization provides dedicated throughput to Amazon EBS and a tuned configuration stack to provide optimal EBS I/O performance. This optimization isn’t available with all instance types. network_interfaces (boto.ec2.networkinterface.NetworkInterfaceCollection) – A NetworkInterfaceCollection data structure containing the ENI specifications for the instance. network_interface_name (string) - The name of Elastic Network Interface to attach .. versionadded:: 2016.11.0 network_interface_id (string) - The id of Elastic Network Interface to attach .. versionadded:: 2016.11.0 attributes (dict) - Instance attributes and value to be applied to the instance. Available options are: - instanceType - A valid instance type (m1.small) - kernel - Kernel ID (None) - ramdisk - Ramdisk ID (None) - userData - Base64 encoded String (None) - disableApiTermination - Boolean (true) - instanceInitiatedShutdownBehavior - stop|terminate - blockDeviceMapping - List of strings - ie: [‘/dev/sda=false’] - sourceDestCheck - Boolean (true) - groupSet - Set of Security Groups or IDs - ebsOptimized - Boolean (false) - sriovNetSupport - String - ie: ‘simple’ target_state (string) - The desired target state of the instance. Available options are: - running - stopped Note that this option is currently UNIMPLEMENTED. public_ip: (string) - The IP of a previously allocated EIP address, which will be attached to the instance. EC2 Classic instances ONLY - for VCP pass in an allocation_id instead. allocation_id: (string) - The ID of a previously allocated EIP address, which will be attached to the instance. VPC instances ONLY - for Classic pass in a public_ip instead. allocate_eip: (bool) - Allocate and attach an EIP on-the-fly for this instance. Note you'll want to releaase this address when terminating the instance, either manually or via the 'release_eip' flag to 'instance_absent'. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.3.0 ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } _create = False running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') changed_attrs = {} if not salt.utils.data.exactly_one((image_id, image_name)): raise SaltInvocationError('Exactly one of image_id OR ' 'image_name must be provided.') if (public_ip or allocation_id or allocate_eip) and not salt.utils.data.exactly_one((public_ip, allocation_id, allocate_eip)): raise SaltInvocationError('At most one of public_ip, allocation_id OR ' 'allocate_eip may be provided.') if instance_id: exists = __salt__['boto_ec2.exists'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not exists: _create = True else: instances = __salt__['boto_ec2.find_instances'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instances: _create = True elif len(instances) > 1: log.debug('Multiple instances matching criteria found - cannot determine a singular instance-id') instance_id = None # No way to know, we'll just have to bail later.... else: instance_id = instances[0] if _create: if __opts__['test']: ret['comment'] = 'The instance {0} is set to be created.'.format(name) ret['result'] = None return ret if image_name: args = {'ami_name': image_name, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} image_ids = __salt__['boto_ec2.find_images'](**args) if image_ids: image_id = image_ids[0] else: image_id = image_name r = __salt__['boto_ec2.run'](image_id, instance_name if instance_name else name, tags=tags, key_name=key_name, security_groups=security_groups, user_data=user_data, instance_type=instance_type, placement=placement, kernel_id=kernel_id, ramdisk_id=ramdisk_id, vpc_id=vpc_id, vpc_name=vpc_name, monitoring_enabled=monitoring_enabled, subnet_id=subnet_id, subnet_name=subnet_name, private_ip_address=private_ip_address, block_device_map=block_device_map, disable_api_termination=disable_api_termination, instance_initiated_shutdown_behavior=instance_initiated_shutdown_behavior, placement_group=placement_group, client_token=client_token, security_group_ids=security_group_ids, security_group_names=security_group_names, additional_info=additional_info, tenancy=tenancy, instance_profile_arn=instance_profile_arn, instance_profile_name=instance_profile_name, ebs_optimized=ebs_optimized, network_interfaces=network_interfaces, network_interface_name=network_interface_name, network_interface_id=network_interface_id, region=region, key=key, keyid=keyid, profile=profile) if not r or 'instance_id' not in r: ret['result'] = False ret['comment'] = 'Failed to create instance {0}.'.format(instance_name if instance_name else name) return ret instance_id = r['instance_id'] ret['changes'] = {'old': {}, 'new': {}} ret['changes']['old']['instance_id'] = None ret['changes']['new']['instance_id'] = instance_id # To avoid issues we only allocate new EIPs at instance creation. # This might miss situations where an instance is initially created # created without and one is added later, but the alternative is the # risk of EIPs allocated at every state run. if allocate_eip: if __opts__['test']: ret['comment'] = 'New EIP would be allocated.' ret['result'] = None return ret domain = 'vpc' if vpc_id or vpc_name else None r = __salt__['boto_ec2.allocate_eip_address']( domain=domain, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to allocate new EIP.' return ret allocation_id = r['allocation_id'] log.info("New EIP with address %s allocated.", r['public_ip']) else: log.info("EIP not requested.") if public_ip or allocation_id: # This can take a bit to show up, give it a chance to... tries = 10 secs = 3 for t in range(tries): r = __salt__['boto_ec2.get_eip_address_info']( addresses=public_ip, allocation_ids=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: break else: log.info( 'Waiting up to %s secs for new EIP %s to become available', tries * secs, public_ip or allocation_id ) time.sleep(secs) if not r: ret['result'] = False ret['comment'] = 'Failed to lookup EIP {0}.'.format(public_ip or allocation_id) return ret ip = r[0]['public_ip'] if r[0].get('instance_id'): if r[0]['instance_id'] != instance_id: ret['result'] = False ret['comment'] = ('EIP {0} is already associated with instance ' '{1}.'.format(public_ip if public_ip else allocation_id, r[0]['instance_id'])) return ret else: if __opts__['test']: ret['comment'] = 'Instance {0} to be updated.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.associate_eip_address']( instance_id=instance_id, public_ip=public_ip, allocation_id=allocation_id, region=region, key=key, keyid=keyid, profile=profile) if r: if 'new' not in ret['changes']: ret['changes']['new'] = {} ret['changes']['new']['public_ip'] = ip else: ret['result'] = False ret['comment'] = 'Failed to attach EIP to instance {0}.'.format( instance_name if instance_name else name) return ret if attributes: for k, v in six.iteritems(attributes): curr = __salt__['boto_ec2.get_attribute'](k, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) curr = {} if not isinstance(curr, dict) else curr if curr.get(k) == v: continue else: if __opts__['test']: changed_attrs[k] = 'The instance attribute {0} is set to be changed from \'{1}\' to \'{2}\'.'.format( k, curr.get(k), v) continue try: r = __salt__['boto_ec2.set_attribute'](attribute=k, attribute_value=v, instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) except SaltInvocationError as e: ret['result'] = False ret['comment'] = 'Failed to set attribute {0} to {1} on instance {2}.'.format(k, v, instance_name) return ret ret['changes'] = ret['changes'] if ret['changes'] else {'old': {}, 'new': {}} ret['changes']['old'][k] = curr.get(k) ret['changes']['new'][k] = v if __opts__['test']: if changed_attrs: ret['changes']['new'] = changed_attrs ret['result'] = None else: ret['comment'] = 'Instance {0} is in the correct state'.format(instance_name if instance_name else name) ret['result'] = True if tags and instance_id is not None: tags = dict(tags) curr_tags = dict(__salt__['boto_ec2.get_all_tags'](filters={'resource-id': instance_id}, region=region, key=key, keyid=keyid, profile=profile).get(instance_id, {})) current = set(curr_tags.keys()) desired = set(tags.keys()) remove = list(current - desired) # Boto explicitly requires a list here and can't cope with a set... add = dict([(t, tags[t]) for t in desired - current]) replace = dict([(t, tags[t]) for t in tags if tags.get(t) != curr_tags.get(t)]) # Tag keys are unique despite the bizarre semantics uses which make it LOOK like they could be duplicative. add.update(replace) if add or remove: if __opts__['test']: ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags ret['comment'] += ' Tags would be updated on instance {0}.'.format(instance_name if instance_name else name) else: if remove: if not __salt__['boto_ec2.delete_tags'](resource_ids=instance_id, tags=remove, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while deleting tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret if add: if not __salt__['boto_ec2.create_tags'](resource_ids=instance_id, tags=add, region=region, key=key, keyid=keyid, profile=profile): msg = "Error while creating tags on instance {0}".format(instance_name if instance_name else name) log.error(msg) ret['comment'] += ' ' + msg ret['result'] = False return ret ret['changes']['old'] = ret['changes']['old'] if 'old' in ret['changes'] else {} ret['changes']['new'] = ret['changes']['new'] if 'new' in ret['changes'] else {} ret['changes']['old']['tags'] = curr_tags ret['changes']['new']['tags'] = tags return ret def instance_absent(name, instance_name=None, instance_id=None, release_eip=False, region=None, key=None, keyid=None, profile=None, filters=None): ''' Ensure an EC2 instance does not exist (is stopped and removed). .. versionchanged:: 2016.11.0 name (string) - The name of the state definition. instance_name (string) - The name of the instance. instance_id (string) - The ID of the instance. release_eip (bool) - Release any associated EIPs during termination. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. filters (dict) - A dict of additional filters to use in matching the instance to delete. YAML example fragment: .. code-block:: yaml - filters: vpc-id: vpc-abcdef12 ''' ### TODO - Implement 'force' option?? Would automagically turn off ### 'disableApiTermination', as needed, before trying to delete. ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not instance_id: try: instance_id = __salt__['boto_ec2.get_id'](name=instance_name if instance_name else name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states, filters=filters) except CommandExecutionError as e: ret['result'] = None ret['comment'] = ("Couldn't determine current status of instance " "{0}.".format(instance_name or name)) return ret instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile, return_objs=True, filters=filters) if not instances: ret['result'] = True ret['comment'] = 'Instance {0} is already gone.'.format(instance_id) return ret instance = instances[0] ### Honor 'disableApiTermination' - if you want to override it, first use set_attribute() to turn it off no_can_do = __salt__['boto_ec2.get_attribute']('disableApiTermination', instance_id=instance_id, region=region, key=key, keyid=keyid, profile=profile) if no_can_do.get('disableApiTermination') is True: ret['result'] = False ret['comment'] = 'Termination of instance {0} via the API is disabled.'.format(instance_id) return ret if __opts__['test']: ret['comment'] = 'The instance {0} is set to be deleted.'.format(name) ret['result'] = None return ret r = __salt__['boto_ec2.terminate'](instance_id=instance_id, name=instance_name, region=region, key=key, keyid=keyid, profile=profile) if not r: ret['result'] = False ret['comment'] = 'Failed to terminate instance {0}.'.format(instance_id) return ret ret['changes']['old'] = {'instance_id': instance_id} ret['changes']['new'] = None if release_eip: ip = getattr(instance, 'ip_address', None) if ip: base_args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} public_ip = None alloc_id = None assoc_id = None if getattr(instance, 'vpc_id', None): r = __salt__['boto_ec2.get_eip_address_info'](addresses=ip, **base_args) if r and 'allocation_id' in r[0]: alloc_id = r[0]['allocation_id'] assoc_id = r[0].get('association_id') else: # I /believe/ this situation is impossible but let's hedge our bets... ret['result'] = False ret['comment'] = "Can't determine AllocationId for address {0}.".format(ip) return ret else: public_ip = instance.ip_address if assoc_id: # Race here - sometimes the terminate above will already have dropped this if not __salt__['boto_ec2.disassociate_eip_address'](association_id=assoc_id, **base_args): log.warning("Failed to disassociate EIP %s.", ip) if __salt__['boto_ec2.release_eip_address'](allocation_id=alloc_id, public_ip=public_ip, **base_args): log.info("Released EIP address %s", public_ip or r[0]['public_ip']) ret['changes']['old']['public_ip'] = public_ip or r[0]['public_ip'] else: ret['result'] = False ret['comment'] = "Failed to release EIP {0}.".format(ip) return ret return ret def volume_absent(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is detached and absent. .. versionadded:: 2016.11.0 name State definition name. volume_name Name tag associated with the volume. For safety, if this matches more than one volume, the state will refuse to apply. volume_id Resource ID of the volume. instance_name Only remove volume if it is attached to instance with this Name tag. Exclusive with 'instance_id'. Requires 'device'. instance_id Only remove volume if it is attached to this instance. Exclusive with 'instance_name'. Requires 'device'. device Match by device rather than ID. Requires one of 'instance_name' or 'instance_id'. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } filters = {} running_states = ('pending', 'rebooting', 'running', 'stopping', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id, instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " "'instance_name', or 'instance_id' must be provided.") if (instance_name or instance_id) and not device: raise SaltInvocationError("Parameter 'device' is required when either " "'instance_name' or 'instance_id' is specified.") if volume_id: filters.update({'volume-id': volume_id}) if volume_name: filters.update({'tag:Name': volume_name}) if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, region=region, key=key, keyid=keyid, profile=profile, in_states=running_states) if not instance_id: ret['comment'] = ('Instance with Name {0} not found. Assuming ' 'associated volumes gone.'.format(instance_name)) return ret if instance_id: filters.update({'attachment.instance-id': instance_id}) if device: filters.update({'attachment.device': device}) args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if not vols: ret['comment'] = 'Volume matching criteria not found, assuming already absent' return ret if len(vols) > 1: msg = "More than one volume matched criteria, can't continue in state {0}".format(name) log.error(msg) ret['comment'] = msg ret['result'] = False return ret vol = vols[0] log.info('Matched Volume ID %s', vol) if __opts__['test']: ret['comment'] = 'The volume {0} is set to be deleted.'.format(vol) ret['result'] = None return ret if __salt__['boto_ec2.delete_volume'](volume_id=vol, force=True, **args): ret['comment'] = 'Volume {0} deleted.'.format(vol) ret['changes'] = {'old': {'volume_id': vol}, 'new': {'volume_id': None}} else: ret['comment'] = 'Error deleting volume {0}.'.format(vol) ret['result'] = False return ret def volumes_tagged(name, tag_maps, authoritative=False, region=None, key=None, keyid=None, profile=None): ''' Ensure EC2 volume(s) matching the given filters have the defined tags. .. versionadded:: 2016.11.0 name State definition name. tag_maps List of dicts of filters and tags, where 'filters' is a dict suitable for passing to the 'filters' argument of boto_ec2.get_all_volumes(), and 'tags' is a dict of tags to be set on volumes as matched by the given filters. The filter syntax is extended to permit passing either a list of volume_ids or an instance_name (with instance_name being the Name tag of the instance to which the desired volumes are mapped). Each mapping in the list is applied separately, so multiple sets of volumes can be all tagged differently with one call to this function. YAML example fragment: .. code-block:: yaml - filters: attachment.instance_id: i-abcdef12 tags: Name: dev-int-abcdef12.aws-foo.com - filters: attachment.device: /dev/sdf tags: ManagedSnapshots: true BillingGroup: bubba.hotep@aws-foo.com - filters: instance_name: prd-foo-01.aws-foo.com tags: Name: prd-foo-01.aws-foo.com BillingGroup: infra-team@aws-foo.com - filters: volume_ids: [ vol-12345689, vol-abcdef12 ] tags: BillingGroup: infra-team@aws-foo.com authoritative Should un-declared tags currently set on matched volumes be deleted? Boolean. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {} } args = {'tag_maps': tag_maps, 'authoritative': authoritative, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if __opts__['test']: args['dry_run'] = True r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags would be updated.' ret['changes'] = r['changes'] ret['result'] = None else: ret['comment'] = 'Error validating requested volume tags.' ret['result'] = False return ret r = __salt__['boto_ec2.set_volumes_tags'](**args) if r['success']: if r.get('changes'): ret['comment'] = 'Tags applied.' ret['changes'] = r['changes'] else: ret['comment'] = 'Error updating requested volume tags.' ret['result'] = False return ret def volume_present(name, volume_name=None, volume_id=None, instance_name=None, instance_id=None, device=None, size=None, snapshot_id=None, volume_type=None, iops=None, encrypted=False, kms_key_id=None, region=None, key=None, keyid=None, profile=None): ''' Ensure the EC2 volume is present and attached. .. name State definition name. volume_name The Name tag value for the volume. If no volume with that matching name tag is found, a new volume will be created. If multiple volumes are matched, the state will fail. volume_id Resource ID of the volume. Exclusive with 'volume_name'. instance_name Attach volume to instance with this Name tag. Exclusive with 'instance_id'. instance_id Attach volume to instance with this ID. Exclusive with 'instance_name'. device The device on the instance through which the volume is exposed (e.g. /dev/sdh) size The size of the new volume, in GiB. If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. snapshot_id The snapshot ID from which the new Volume will be created. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. volume_type The type of the volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. Valid volume types for AWS can be found here: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html iops The provisioned IOPS you want to associate with this volume. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. encrypted Specifies whether the volume should be encrypted. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. kms_key_id If encrypted is True, this KMS Key ID may be specified to encrypt volume with this key. Optionally specified at volume creation time; will be ignored afterward. Requires 'volume_name'. e.g.: arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} old_dict = {} new_dict = {} running_states = ('running', 'stopped') if not salt.utils.data.exactly_one((volume_name, volume_id)): raise SaltInvocationError("Exactly one of 'volume_name', 'volume_id', " " must be provided.") if not salt.utils.data.exactly_one((instance_name, instance_id)): raise SaltInvocationError("Exactly one of 'instance_name', or 'instance_id'" " must be provided.") if device is None: raise SaltInvocationError("Parameter 'device' is required.") args = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile} if instance_name: instance_id = __salt__['boto_ec2.get_id']( name=instance_name, in_states=running_states, **args) if not instance_id: raise SaltInvocationError('Instance with Name {0} not found.'.format(instance_name)) instances = __salt__['boto_ec2.find_instances'](instance_id=instance_id, return_objs=True, **args) instance = instances[0] if volume_name: filters = {} filters.update({'tag:Name': volume_name}) vols = __salt__['boto_ec2.get_all_volumes'](filters=filters, **args) if len(vols) > 1: msg = "More than one volume matched volume name {0}, can't continue in state {1}".format(volume_name, name) raise SaltInvocationError(msg) if not vols: if __opts__['test']: ret['comment'] = ('The volume with name {0} is set to be created and attached' ' on {1}({2}).'.format(volume_name, instance_id, device)) ret['result'] = None return ret _rt = __salt__['boto_ec2.create_volume'](zone_name=instance.placement, size=size, snapshot_id=snapshot_id, volume_type=volume_type, iops=iops, encrypted=encrypted, kms_key_id=kms_key_id, wait_for_creation=True, **args) if 'result' in _rt: volume_id = _rt['result'] else: raise SaltInvocationError('Error creating volume with name {0}.'.format(volume_name)) _rt = __salt__['boto_ec2.set_volumes_tags'](tag_maps=[{ 'filters': {'volume_ids': [volume_id]}, 'tags': {'Name': volume_name} }], **args) if _rt['success'] is False: raise SaltInvocationError('Error updating requested volume ' '{0} with name {1}. {2}'.format(volume_id, volume_name, _rt['comment'])) old_dict['volume_id'] = None new_dict['volume_id'] = volume_id else: volume_id = vols[0] vols = __salt__['boto_ec2.get_all_volumes'](volume_ids=[volume_id], return_objs=True, **args) if not vols: raise SaltInvocationError('Volume {0} do not exist'.format(volume_id)) vol = vols[0] if vol.zone != instance.placement: raise SaltInvocationError(('Volume {0} in {1} cannot attach to instance' ' {2} in {3}.').format(volume_id, vol.zone, instance_id, instance.placement)) attach_data = vol.attach_data if attach_data is not None and attach_data.instance_id is not None: if instance_id == attach_data.instance_id and device == attach_data.device: ret['comment'] = 'The volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device) return ret else: if __opts__['test']: ret['comment'] = ('The volume {0} is set to be detached' ' from {1}({2} and attached on {3}({4}).').format(attach_data.instance_id, attach_data.devic, volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.detach_volume'](volume_id=volume_id, wait_for_detachement=True, **args): ret['comment'] = 'Volume {0} is detached from {1}({2}).'.format(volume_id, attach_data.instance_id, attach_data.device) old_dict['instance_id'] = attach_data.instance_id old_dict['device'] = attach_data.device else: raise SaltInvocationError(('The volume {0} is already attached on instance {1}({2}).' ' Failed to detach').format(volume_id, attach_data.instance_id, attach_data.device)) else: old_dict['instance_id'] = instance_id old_dict['device'] = None if __opts__['test']: ret['comment'] = 'The volume {0} is set to be attached on {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = None return ret if __salt__['boto_ec2.attach_volume'](volume_id=volume_id, instance_id=instance_id, device=device, **args): ret['comment'] = ' '.join([ ret['comment'], 'Volume {0} is attached on {1}({2}).'.format(volume_id, instance_id, device)]) new_dict['instance_id'] = instance_id new_dict['device'] = device ret['changes'] = {'old': old_dict, 'new': new_dict} else: ret['comment'] = 'Error attaching volume {0} to instance {1}({2}).'.format(volume_id, instance_id, device) ret['result'] = False return ret def private_ips_present(name, network_interface_name=None, network_interface_id=None, private_ip_addresses=None, allow_reassignment=False, region=None, key=None, keyid=None, profile=None): ''' Ensure an ENI has secondary private ip addresses associated with it name (String) - State definition name network_interface_id (String) - The EC2 network interface id, example eni-123456789 private_ip_addresses (List or String) - The secondary private ip address(es) that should be present on the ENI. allow_reassignment (Boolean) - If true, will reassign a secondary private ip address associated with another ENI. If false, state will fail if the secondary private ip address is associated with another ENI. region (string) - Region to connect to. key (string) - Secret key to be used. keyid (string) - Access key to be used. profile (variable) - A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. ''' if not salt.utils.data.exactly_one((network_interface_name, network_interface_id)): raise SaltInvocationError("Exactly one of 'network_interface_name', " "'network_interface_id' must be provided") if not private_ip_addresses: raise SaltInvocationError("You must provide the private_ip_addresses to associate with the " "ENI") ret = { 'name': name, 'result': True, 'comment': '', 'changes': {'old': [], 'new': []} } get_eni_args = { 'name': network_interface_name, 'network_interface_id': network_interface_id, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) # Check if there are any new secondary private ips to add to the eni if eni and eni.get('result', {}).get('private_ip_addresses'): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['old'].append(eni_pip['private_ip_address']) ips_to_add = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['old']: ips_to_add.append(private_ip) if ips_to_add: if not __opts__['test']: # Assign secondary private ips to ENI assign_ips_args = { 'network_interface_id': network_interface_id, 'private_ip_addresses': ips_to_add, 'allow_reassignment': allow_reassignment, 'region': region, 'key': key, 'keyid': keyid, 'profile': profile } __salt__['boto_ec2.assign_private_ip_addresses'](**assign_ips_args) # Verify secondary private ips were properly assigned to ENI eni = __salt__['boto_ec2.get_network_interface'](**get_eni_args) if eni and eni.get('result', {}).get('private_ip_addresses', None): for eni_pip in eni['result']['private_ip_addresses']: ret['changes']['new'].append(eni_pip['private_ip_address']) ips_not_added = [] for private_ip in private_ip_addresses: if private_ip not in ret['changes']['new']: ips_not_added.append(private_ip) # Display results if ips_not_added: ret['result'] = False ret['comment'] = ('ips on eni: {0}\n' 'attempted to add: {1}\n' 'could not add the following ips: {2}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['new']), '\n\t- ' + '\n\t- '.join(ips_to_add), '\n\t- ' + '\n\t- '.join(ips_not_added))) else: ret['comment'] = "added ips: {0}".format( '\n\t- ' + '\n\t- '.join(ips_to_add)) # Verify there were changes if ret['changes']['old'] == ret['changes']['new']: ret['changes'] = {} else: # Testing mode, show that there were ips to add ret['comment'] = ('ips on eni: {0}\n' 'ips that would be added: {1}\n'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old']), '\n\t- ' + '\n\t- '.join(ips_to_add))) ret['changes'] = {} ret['result'] = None else: ret['comment'] = 'ips on eni: {0}'.format( '\n\t- ' + '\n\t- '.join(ret['changes']['old'])) # there were no changes since we did not attempt to remove ips ret['changes'] = {} return ret
saltstack/salt
salt/utils/docker/translate/network.py
_post_processing
python
def _post_processing(kwargs, skip_translate, invalid): # pylint: disable=unused-argument ''' Additional network-specific post-translation processing ''' # If any defaults were not expicitly passed, add them for item in DEFAULTS: if item not in kwargs: kwargs[item] = DEFAULTS[item]
Additional network-specific post-translation processing
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/docker/translate/network.py#L33-L40
null
# -*- coding: utf-8 -*- ''' Functions to translate input for network creation ''' # Import Python libs from __future__ import absolute_import, print_function, unicode_literals # Import Salt libs from salt.exceptions import SaltInvocationError # Import 3rd-party libs from salt.ext import six # Import helpers from . import helpers ALIASES = { 'driver_opt': 'options', 'driver_opts': 'options', 'ipv6': 'enable_ipv6', } IPAM_ALIASES = { 'ip_range': 'iprange', 'aux_address': 'aux_addresses', } # ALIASES is a superset of IPAM_ALIASES ALIASES.update(IPAM_ALIASES) ALIASES_REVMAP = dict([(y, x) for x, y in six.iteritems(ALIASES)]) DEFAULTS = {'check_duplicate': True} # Functions below must match names of docker-py arguments def driver(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_str(val) def options(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_key_val(val, delimiter='=') def ipam(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_dict(val) def check_duplicate(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def internal(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def labels(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_labels(val) def enable_ipv6(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def attachable(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) def ingress(val, **kwargs): # pylint: disable=unused-argument return helpers.translate_bool(val) # IPAM args def ipam_driver(val, **kwargs): # pylint: disable=unused-argument return driver(val, **kwargs) def ipam_opts(val, **kwargs): # pylint: disable=unused-argument return options(val, **kwargs) def ipam_pools(val, **kwargs): # pylint: disable=unused-argument if not hasattr(val, '__iter__') \ or not all(isinstance(x, dict) for x in val): # Can't do a simple dictlist check because each dict may have more than # one element. raise SaltInvocationError('ipam_pools must be a list of dictionaries') skip_translate = kwargs.get('skip_translate', ()) if not (skip_translate is True or 'ipam_pools' in skip_translate): _globals = globals() for ipam_dict in val: for key in list(ipam_dict): if skip_translate is not True and key in skip_translate: continue if key in IPAM_ALIASES: # Make sure we resolve aliases, since this wouldn't have # been done within the individual IPAM dicts ipam_dict[IPAM_ALIASES[key]] = ipam_dict.pop(key) key = IPAM_ALIASES[key] if key in _globals: ipam_dict[key] = _globals[key](ipam_dict[key]) return val def subnet(val, **kwargs): # pylint: disable=unused-argument validate_ip_addrs = kwargs.get('validate_ip_addrs', True) val = helpers.translate_str(val) if validate_ip_addrs: helpers.validate_subnet(val) return val def iprange(val, **kwargs): # pylint: disable=unused-argument validate_ip_addrs = kwargs.get('validate_ip_addrs', True) val = helpers.translate_str(val) if validate_ip_addrs: helpers.validate_subnet(val) return val def gateway(val, **kwargs): # pylint: disable=unused-argument validate_ip_addrs = kwargs.get('validate_ip_addrs', True) val = helpers.translate_str(val) if validate_ip_addrs: helpers.validate_ip(val) return val def aux_addresses(val, **kwargs): # pylint: disable=unused-argument validate_ip_addrs = kwargs.get('validate_ip_addrs', True) val = helpers.translate_key_val(val, delimiter='=') if validate_ip_addrs: for address in six.itervalues(val): helpers.validate_ip(address) return val
saltstack/salt
salt/client/__init__.py
get_local_client
python
def get_local_client( c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, auto_reconnect=False): ''' .. versionadded:: 2014.7.0 Read in the config and return the correct LocalClient object based on the configured transport :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: opts = mopts else: # Late import to prevent circular import import salt.config opts = salt.config.client_config(c_path) # TODO: AIO core is separate from transport return LocalClient( mopts=opts, skip_perm_errors=skip_perm_errors, io_loop=io_loop, auto_reconnect=auto_reconnect)
.. versionadded:: 2014.7.0 Read in the config and return the correct LocalClient object based on the configured transport :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L76-L106
[ "def client_config(path, env_var='SALT_CLIENT_CONFIG', defaults=None):\n '''\n Load Master configuration data\n\n Usage:\n\n .. code-block:: python\n\n import salt.config\n master_opts = salt.config.client_config('/etc/salt/master')\n\n Returns a dictionary of the Salt Master configuration file with necessary\n options needed to communicate with a locally-running Salt Master daemon.\n This function searches for client specific configurations and adds them to\n the data from the master configuration.\n\n This is useful for master-side operations like\n :py:class:`~salt.client.LocalClient`.\n '''\n if defaults is None:\n defaults = DEFAULT_MASTER_OPTS.copy()\n\n xdg_dir = salt.utils.xdg.xdg_config_dir()\n if os.path.isdir(xdg_dir):\n client_config_dir = xdg_dir\n saltrc_config_file = 'saltrc'\n else:\n client_config_dir = os.path.expanduser('~')\n saltrc_config_file = '.saltrc'\n\n # Get the token file path from the provided defaults. If not found, specify\n # our own, sane, default\n opts = {\n 'token_file': defaults.get(\n 'token_file',\n os.path.join(client_config_dir, 'salt_token')\n )\n }\n # Update options with the master configuration, either from the provided\n # path, salt's defaults or provided defaults\n opts.update(\n master_config(path, defaults=defaults)\n )\n # Update with the users salt dot file or with the environment variable\n saltrc_config = os.path.join(client_config_dir, saltrc_config_file)\n opts.update(\n load_config(\n saltrc_config,\n env_var,\n saltrc_config\n )\n )\n # Make sure we have a proper and absolute path to the token file\n if 'token_file' in opts:\n opts['token_file'] = os.path.abspath(\n os.path.expanduser(\n opts['token_file']\n )\n )\n # If the token file exists, read and store the contained token\n if os.path.isfile(opts['token_file']):\n # Make sure token is still valid\n expire = opts.get('token_expire', 43200)\n if os.stat(opts['token_file']).st_mtime + expire > time.mktime(time.localtime()):\n with salt.utils.files.fopen(opts['token_file']) as fp_:\n opts['token'] = fp_.read().strip()\n # On some platforms, like OpenBSD, 0.0.0.0 won't catch a master running on localhost\n if opts['interface'] == '0.0.0.0':\n opts['interface'] = '127.0.0.1'\n\n # Make sure the master_uri is set\n if 'master_uri' not in opts:\n opts['master_uri'] = 'tcp://{ip}:{port}'.format(\n ip=salt.utils.zeromq.ip_bracket(opts['interface']),\n port=opts['ret_port']\n )\n\n # Return the client options\n _validate_opts(opts)\n return opts\n" ]
# -*- coding: utf-8 -*- ''' The client module is used to create a client connection to the publisher The data structure needs to be: {'enc': 'clear', 'load': {'fun': '<mod.callable>', 'arg':, ('arg1', 'arg2', ...), 'tgt': '<glob or id>', 'key': '<read in the key file>'} ''' # The components here are simple, and they need to be and stay simple, we # want a client to have 3 external concerns, and maybe a forth configurable # option. # The concerns are: # 1. Who executes the command? # 2. What is the function being run? # 3. What arguments need to be passed to the function? # 4. How long do we wait for all of the replies? # # Import python libs from __future__ import absolute_import, print_function, unicode_literals import os import time import random import logging from datetime import datetime # Import salt libs import salt.config import salt.cache import salt.defaults.exitcodes import salt.payload import salt.transport.client import salt.loader import salt.utils.args import salt.utils.event import salt.utils.files import salt.utils.jid import salt.utils.minions import salt.utils.platform import salt.utils.stringutils import salt.utils.user import salt.utils.verify import salt.utils.zeromq import salt.syspaths as syspaths from salt.exceptions import ( AuthenticationError, AuthorizationError, EauthAuthenticationError, PublishError, SaltInvocationError, SaltReqTimeoutError, SaltClientError ) # Import third party libs from salt.ext import six # pylint: disable=import-error # Try to import range from https://github.com/ytoolshed/range HAS_RANGE = False try: import seco.range HAS_RANGE = True except ImportError: pass # pylint: enable=import-error # Import tornado import tornado.gen # pylint: disable=F0401 log = logging.getLogger(__name__) class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id)) class FunctionWrapper(dict): ''' Create a function wrapper that looks like the functions dict on the minion but invoked commands on the minion via a LocalClient. This allows SLS files to be loaded with an object that calls down to the minion when the salt functions dict is referenced. ''' def __init__(self, opts, minion): super(FunctionWrapper, self).__init__() self.opts = opts self.minion = minion self.local = LocalClient(self.opts['conf_file']) self.functions = self.__load_functions() def __missing__(self, key): ''' Since the function key is missing, wrap this call to a command to the minion of said key if it is available in the self.functions set ''' if key not in self.functions: raise KeyError return self.run_key(key) def __load_functions(self): ''' Find out what functions are available on the minion ''' return set(self.local.cmd(self.minion, 'sys.list_functions').get(self.minion, [])) def run_key(self, key): ''' Return a function that executes the arguments passed via the local client ''' def func(*args, **kwargs): ''' Run a remote call ''' args = list(args) for _key, _val in kwargs: args.append('{0}={1}'.format(_key, _val)) return self.local.cmd(self.minion, key, args) return func class Caller(object): ''' ``Caller`` is the same interface used by the :command:`salt-call` command-line tool on the Salt Minion. .. versionchanged:: 2015.8.0 Added the ``cmd`` method for consistency with the other Salt clients. The existing ``function`` and ``sminion.functions`` interfaces still exist but have been removed from the docs. Importing and using ``Caller`` must be done on the same machine as a Salt Minion and it must be done using the same user that the Salt Minion is running as. Usage: .. code-block:: python import salt.client caller = salt.client.Caller() caller.cmd('test.ping') Note, a running master or minion daemon is not required to use this class. Running ``salt-call --local`` simply sets :conf_minion:`file_client` to ``'local'``. The same can be achieved at the Python level by including that setting in a minion config file. .. versionadded:: 2014.7.0 Pass the minion config as the ``mopts`` dictionary. .. code-block:: python import salt.client import salt.config __opts__ = salt.config.minion_config('/etc/salt/minion') __opts__['file_client'] = 'local' caller = salt.client.Caller(mopts=__opts__) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'minion'), mopts=None): # Late-import of the minion module to keep the CLI as light as possible import salt.minion if mopts: self.opts = mopts else: self.opts = salt.config.minion_config(c_path) self.sminion = salt.minion.SMinion(self.opts) def cmd(self, fun, *args, **kwargs): ''' Call an execution module with the given arguments and keyword arguments .. versionchanged:: 2015.8.0 Added the ``cmd`` method for consistency with the other Salt clients. The existing ``function`` and ``sminion.functions`` interfaces still exist but have been removed from the docs. .. code-block:: python caller.cmd('test.arg', 'Foo', 'Bar', baz='Baz') caller.cmd('event.send', 'myco/myevent/something', data={'foo': 'Foo'}, with_env=['GIT_COMMIT'], with_grains=True) ''' return self.sminion.functions[fun](*args, **kwargs) def function(self, fun, *args, **kwargs): ''' Call a single salt function ''' func = self.sminion.functions[fun] args, kwargs = salt.minion.load_args_and_kwargs( func, salt.utils.args.parse_input(args, kwargs=kwargs),) return func(*args, **kwargs) class ProxyCaller(object): ''' ``ProxyCaller`` is the same interface used by the :command:`salt-call` with the args ``--proxyid <proxyid>`` command-line tool on the Salt Proxy Minion. Importing and using ``ProxyCaller`` must be done on the same machine as a Salt Minion and it must be done using the same user that the Salt Minion is running as. Usage: .. code-block:: python import salt.client caller = salt.client.Caller() caller.cmd('test.ping') Note, a running master or minion daemon is not required to use this class. Running ``salt-call --local`` simply sets :conf_minion:`file_client` to ``'local'``. The same can be achieved at the Python level by including that setting in a minion config file. .. code-block:: python import salt.client import salt.config __opts__ = salt.config.proxy_config('/etc/salt/proxy', minion_id='quirky_edison') __opts__['file_client'] = 'local' caller = salt.client.ProxyCaller(mopts=__opts__) .. note:: To use this for calling proxies, the :py:func:`is_proxy functions <salt.utils.platform.is_proxy>` requires that ``--proxyid`` be an argument on the commandline for the script this is used in, or that the string ``proxy`` is in the name of the script. ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'proxy'), mopts=None): # Late-import of the minion module to keep the CLI as light as possible import salt.minion self.opts = mopts or salt.config.proxy_config(c_path) self.sminion = salt.minion.SProxyMinion(self.opts) def cmd(self, fun, *args, **kwargs): ''' Call an execution module with the given arguments and keyword arguments .. code-block:: python caller.cmd('test.arg', 'Foo', 'Bar', baz='Baz') caller.cmd('event.send', 'myco/myevent/something', data={'foo': 'Foo'}, with_env=['GIT_COMMIT'], with_grains=True) ''' func = self.sminion.functions[fun] data = { 'arg': args, 'fun': fun } data.update(kwargs) executors = getattr(self.sminion, 'module_executors', []) or \ self.opts.get('module_executors', ['direct_call']) if isinstance(executors, six.string_types): executors = [executors] for name in executors: fname = '{0}.execute'.format(name) if fname not in self.sminion.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = self.sminion.executors[fname](self.opts, data, func, args, kwargs) if return_data is not None: break return return_data
saltstack/salt
salt/client/__init__.py
LocalClient.__read_master_key
python
def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return ''
Read in the rotating master authentication key
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L175-L200
[ "def fopen(*args, **kwargs):\n '''\n Wrapper around open() built-in to set CLOEXEC on the fd.\n\n This flag specifies that the file descriptor should be closed when an exec\n function is invoked;\n\n When a file descriptor is allocated (as with open or dup), this bit is\n initially cleared on the new file descriptor, meaning that descriptor will\n survive into the new program after exec.\n\n NB! We still have small race condition between open and fcntl.\n '''\n if six.PY3:\n try:\n # Don't permit stdin/stdout/stderr to be opened. The boolean False\n # and True are treated by Python 3's open() as file descriptors 0\n # and 1, respectively.\n if args[0] in (0, 1, 2):\n raise TypeError(\n '{0} is not a permitted file descriptor'.format(args[0])\n )\n except IndexError:\n pass\n binary = None\n # ensure 'binary' mode is always used on Windows in Python 2\n if ((six.PY2 and salt.utils.platform.is_windows() and 'binary' not in kwargs) or\n kwargs.pop('binary', False)):\n if len(args) > 1:\n args = list(args)\n if 'b' not in args[1]:\n args[1] = args[1].replace('t', 'b')\n if 'b' not in args[1]:\n args[1] += 'b'\n elif kwargs.get('mode'):\n if 'b' not in kwargs['mode']:\n kwargs['mode'] = kwargs['mode'].replace('t', 'b')\n if 'b' not in kwargs['mode']:\n kwargs['mode'] += 'b'\n else:\n # the default is to read\n kwargs['mode'] = 'rb'\n elif six.PY3 and 'encoding' not in kwargs:\n # In Python 3, if text mode is used and the encoding\n # is not specified, set the encoding to 'utf-8'.\n binary = False\n if len(args) > 1:\n args = list(args)\n if 'b' in args[1]:\n binary = True\n if kwargs.get('mode', None):\n if 'b' in kwargs['mode']:\n binary = True\n if not binary:\n kwargs['encoding'] = __salt_system_encoding__\n\n if six.PY3 and not binary and not kwargs.get('newline', None):\n kwargs['newline'] = ''\n\n f_handle = open(*args, **kwargs) # pylint: disable=resource-leakage\n\n if is_fcntl_available():\n # modify the file descriptor on systems with fcntl\n # unix and unix-like systems only\n try:\n FD_CLOEXEC = fcntl.FD_CLOEXEC # pylint: disable=C0103\n except AttributeError:\n FD_CLOEXEC = 1 # pylint: disable=C0103\n old_flags = fcntl.fcntl(f_handle.fileno(), fcntl.F_GETFD)\n fcntl.fcntl(f_handle.fileno(), fcntl.F_SETFD, old_flags | FD_CLOEXEC)\n\n return f_handle\n", "def to_unicode(s, encoding=None, errors='strict', normalize=False):\n '''\n Given str or unicode, return unicode (str for python 3)\n '''\n def _normalize(s):\n return unicodedata.normalize('NFC', s) if normalize else s\n\n if encoding is None:\n # Try utf-8 first, and fall back to detected encoding\n encoding = ('utf-8', __salt_system_encoding__)\n if not isinstance(encoding, (tuple, list)):\n encoding = (encoding,)\n\n if not encoding:\n raise ValueError('encoding cannot be empty')\n\n exc = None\n if six.PY3:\n if isinstance(s, str):\n return _normalize(s)\n elif isinstance(s, (bytes, bytearray)):\n return _normalize(to_str(s, encoding, errors))\n raise TypeError('expected str, bytes, or bytearray')\n else:\n # This needs to be str and not six.string_types, since if the string is\n # already a unicode type, it does not need to be decoded (and doing so\n # will raise an exception).\n if isinstance(s, unicode): # pylint: disable=incompatible-py3-code,undefined-variable\n return _normalize(s)\n elif isinstance(s, (str, bytearray)):\n for enc in encoding:\n try:\n return _normalize(s.decode(enc, errors))\n except UnicodeDecodeError as err:\n exc = err\n continue\n # The only way we get this far is if a UnicodeDecodeError was\n # raised, otherwise we would have already returned (or raised some\n # other exception).\n raise exc # pylint: disable=raising-bad-type\n raise TypeError('expected str or bytearray')\n", "def check_path_traversal(path, user='root', skip_perm_errors=False):\n '''\n Walk from the root up to a directory and verify that the current\n user has access to read each directory. This is used for making\n sure a user can read all parent directories of the minion's key\n before trying to go and generate a new key and raising an IOError\n '''\n for tpath in list_path_traversal(path):\n if not os.access(tpath, os.R_OK):\n msg = 'Could not access {0}.'.format(tpath)\n if not os.path.exists(tpath):\n msg += ' Path does not exist.'\n else:\n current_user = salt.utils.user.get_user()\n # Make the error message more intelligent based on how\n # the user invokes salt-call or whatever other script.\n if user != current_user:\n msg += ' Try running as user {0}.'.format(user)\n else:\n msg += ' Please give {0} read permissions.'.format(user)\n\n # We don't need to bail on config file permission errors\n # if the CLI\n # process is run with the -a flag\n if skip_perm_errors:\n return\n # Propagate this exception up so there isn't a sys.exit()\n # in the middle of code that could be imported elsewhere.\n raise SaltClientError(msg)\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient._convert_range_to_list
python
def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return []
convert a seco.range range into a list target
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L202-L211
null
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient._get_timeout
python
def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout']
Return the timeout to use
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L213-L227
null
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.gather_job_info
python
def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data
Return the information about a given job
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L229-L248
[ "def run_job(\n self,\n tgt,\n fun,\n arg=(),\n tgt_type='glob',\n ret='',\n timeout=None,\n jid='',\n kwarg=None,\n listen=False,\n **kwargs):\n '''\n Asynchronously send a command to connected minions\n\n Prep the job directory and publish a command to any targeted minions.\n\n :return: A dictionary of (validated) ``pub_data`` or an empty\n dictionary on failure. The ``pub_data`` contains the job ID and a\n list of all minions that are expected to return data.\n\n .. code-block:: python\n\n >>> local.run_job('*', 'test.sleep', [300])\n {'jid': '20131219215650131543', 'minions': ['jerry']}\n '''\n arg = salt.utils.args.parse_input(arg, kwargs=kwarg)\n\n try:\n pub_data = self.pub(\n tgt,\n fun,\n arg,\n tgt_type,\n ret,\n jid=jid,\n timeout=self._get_timeout(timeout),\n listen=listen,\n **kwargs)\n except SaltClientError:\n # Re-raise error with specific message\n raise SaltClientError(\n 'The salt master could not be contacted. Is master running?'\n )\n except AuthenticationError as err:\n raise AuthenticationError(err)\n except AuthorizationError as err:\n raise AuthorizationError(err)\n except Exception as general_exception:\n # Convert to generic client error and pass along message\n raise SaltClientError(general_exception)\n\n return self._check_pub_data(pub_data, listen=listen)\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient._check_pub_data
python
def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data
Common checks on the pub_data data structure returned from running pub
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L250-L294
null
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.run_job
python
def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen)
Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']}
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L296-L348
[ "def parse_input(args, kwargs=None, condition=True, no_parse=None):\n '''\n Parse out the args and kwargs from a list of input values. Optionally,\n return the args and kwargs without passing them to condition_input().\n\n Don't pull args with key=val apart if it has a newline in it.\n '''\n if no_parse is None:\n no_parse = ()\n if kwargs is None:\n kwargs = {}\n _args = []\n _kwargs = {}\n for arg in args:\n if isinstance(arg, six.string_types):\n arg_name, arg_value = parse_kwarg(arg)\n if arg_name:\n _kwargs[arg_name] = yamlify_arg(arg_value) \\\n if arg_name not in no_parse \\\n else arg_value\n else:\n _args.append(yamlify_arg(arg))\n elif isinstance(arg, dict):\n # Yes, we're popping this key off and adding it back if\n # condition_input is called below, but this is the only way to\n # gracefully handle both CLI and API input.\n if arg.pop('__kwarg__', False) is True:\n _kwargs.update(arg)\n else:\n _args.append(arg)\n else:\n _args.append(arg)\n _kwargs.update(kwargs)\n if condition:\n return condition_input(_args, _kwargs)\n return _args, _kwargs\n", "def _get_timeout(self, timeout):\n '''\n Return the timeout to use\n '''\n if timeout is None:\n return self.opts['timeout']\n if isinstance(timeout, int):\n return timeout\n if isinstance(timeout, six.string_types):\n try:\n return int(timeout)\n except ValueError:\n return self.opts['timeout']\n # Looks like the timeout is invalid, use config\n return self.opts['timeout']\n", "def _check_pub_data(self, pub_data, listen=True):\n '''\n Common checks on the pub_data data structure returned from running pub\n '''\n if pub_data == '':\n # Failed to authenticate, this could be a bunch of things\n raise EauthAuthenticationError(\n 'Failed to authenticate! This is most likely because this '\n 'user is not permitted to execute commands, but there is a '\n 'small possibility that a disk error occurred (check '\n 'disk/inode usage).'\n )\n\n # Failed to connect to the master and send the pub\n if 'error' in pub_data:\n print(pub_data['error'])\n log.debug('_check_pub_data() error: %s', pub_data['error'])\n return {}\n elif 'jid' not in pub_data:\n return {}\n if pub_data['jid'] == '0':\n print('Failed to connect to the Master, '\n 'is the Salt Master running?')\n return {}\n\n # If we order masters (via a syndic), don't short circuit if no minions\n # are found\n if not self.opts.get('order_masters'):\n # Check for no minions\n if not pub_data['minions']:\n print('No minions matched the target. '\n 'No command was sent, no jid was assigned.')\n return {}\n\n # don't install event subscription listeners when the request is asynchronous\n # and doesn't care. this is important as it will create event leaks otherwise\n if not listen:\n return pub_data\n\n if self.opts.get('order_masters'):\n self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex')\n\n self.event.subscribe('salt/job/{0}'.format(pub_data['jid']))\n\n return pub_data\n", "def pub(self,\n tgt,\n fun,\n arg=(),\n tgt_type='glob',\n ret='',\n jid='',\n timeout=5,\n listen=False,\n **kwargs):\n '''\n Take the required arguments and publish the given command.\n Arguments:\n tgt:\n The tgt is a regex or a glob used to match up the ids on\n the minions. Salt works by always publishing every command\n to all of the minions and then the minions determine if\n the command is for them based on the tgt value.\n fun:\n The function name to be called on the remote host(s), this\n must be a string in the format \"<modulename>.<function name>\"\n arg:\n The arg option needs to be a tuple of arguments to pass\n to the calling function, if left blank\n Returns:\n jid:\n A string, as returned by the publisher, which is the job\n id, this will inform the client where to get the job results\n minions:\n A set, the targets that the tgt passed should match.\n '''\n # Make sure the publisher is running by checking the unix socket\n if (self.opts.get('ipc_mode', '') != 'tcp' and\n not os.path.exists(os.path.join(self.opts['sock_dir'],\n 'publish_pull.ipc'))):\n log.error(\n 'Unable to connect to the salt master publisher at %s',\n self.opts['sock_dir']\n )\n raise SaltClientError\n\n payload_kwargs = self._prep_pub(\n tgt,\n fun,\n arg,\n tgt_type,\n ret,\n jid,\n timeout,\n **kwargs)\n\n master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \\\n ':' + six.text_type(self.opts['ret_port'])\n channel = salt.transport.client.ReqChannel.factory(self.opts,\n crypt='clear',\n master_uri=master_uri)\n\n try:\n # Ensure that the event subscriber is connected.\n # If not, we won't get a response, so error out\n if listen and not self.event.connect_pub(timeout=timeout):\n raise SaltReqTimeoutError()\n payload = channel.send(payload_kwargs, timeout=timeout)\n except SaltReqTimeoutError as err:\n log.error(err)\n raise SaltReqTimeoutError(\n 'Salt request timed out. The master is not responding. You '\n 'may need to run your command with `--async` in order to '\n 'bypass the congested event bus. With `--async`, the CLI tool '\n 'will print the job id (jid) and exit immediately without '\n 'listening for responses. You can then use '\n '`salt-run jobs.lookup_jid` to look up the results of the job '\n 'in the job cache later.'\n )\n\n if not payload:\n # The master key could have changed out from under us! Regen\n # and try again if the key has changed\n key = self.__read_master_key()\n if key == self.key:\n return payload\n self.key = key\n payload_kwargs['key'] = self.key\n payload = channel.send(payload_kwargs)\n\n error = payload.pop('error', None)\n if error is not None:\n if isinstance(error, dict):\n err_name = error.get('name', '')\n err_msg = error.get('message', '')\n if err_name == 'AuthenticationError':\n raise AuthenticationError(err_msg)\n elif err_name == 'AuthorizationError':\n raise AuthorizationError(err_msg)\n\n raise PublishError(error)\n\n if not payload:\n return payload\n\n # We have the payload, let's get rid of the channel fast(GC'ed faster)\n channel.close()\n\n return {'jid': payload['load']['jid'],\n 'minions': payload['load']['minions']}\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.run_job_async
python
def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen))
Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']}
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L355-L409
[ "def parse_input(args, kwargs=None, condition=True, no_parse=None):\n '''\n Parse out the args and kwargs from a list of input values. Optionally,\n return the args and kwargs without passing them to condition_input().\n\n Don't pull args with key=val apart if it has a newline in it.\n '''\n if no_parse is None:\n no_parse = ()\n if kwargs is None:\n kwargs = {}\n _args = []\n _kwargs = {}\n for arg in args:\n if isinstance(arg, six.string_types):\n arg_name, arg_value = parse_kwarg(arg)\n if arg_name:\n _kwargs[arg_name] = yamlify_arg(arg_value) \\\n if arg_name not in no_parse \\\n else arg_value\n else:\n _args.append(yamlify_arg(arg))\n elif isinstance(arg, dict):\n # Yes, we're popping this key off and adding it back if\n # condition_input is called below, but this is the only way to\n # gracefully handle both CLI and API input.\n if arg.pop('__kwarg__', False) is True:\n _kwargs.update(arg)\n else:\n _args.append(arg)\n else:\n _args.append(arg)\n _kwargs.update(kwargs)\n if condition:\n return condition_input(_args, _kwargs)\n return _args, _kwargs\n", "def _get_timeout(self, timeout):\n '''\n Return the timeout to use\n '''\n if timeout is None:\n return self.opts['timeout']\n if isinstance(timeout, int):\n return timeout\n if isinstance(timeout, six.string_types):\n try:\n return int(timeout)\n except ValueError:\n return self.opts['timeout']\n # Looks like the timeout is invalid, use config\n return self.opts['timeout']\n", "def _check_pub_data(self, pub_data, listen=True):\n '''\n Common checks on the pub_data data structure returned from running pub\n '''\n if pub_data == '':\n # Failed to authenticate, this could be a bunch of things\n raise EauthAuthenticationError(\n 'Failed to authenticate! This is most likely because this '\n 'user is not permitted to execute commands, but there is a '\n 'small possibility that a disk error occurred (check '\n 'disk/inode usage).'\n )\n\n # Failed to connect to the master and send the pub\n if 'error' in pub_data:\n print(pub_data['error'])\n log.debug('_check_pub_data() error: %s', pub_data['error'])\n return {}\n elif 'jid' not in pub_data:\n return {}\n if pub_data['jid'] == '0':\n print('Failed to connect to the Master, '\n 'is the Salt Master running?')\n return {}\n\n # If we order masters (via a syndic), don't short circuit if no minions\n # are found\n if not self.opts.get('order_masters'):\n # Check for no minions\n if not pub_data['minions']:\n print('No minions matched the target. '\n 'No command was sent, no jid was assigned.')\n return {}\n\n # don't install event subscription listeners when the request is asynchronous\n # and doesn't care. this is important as it will create event leaks otherwise\n if not listen:\n return pub_data\n\n if self.opts.get('order_masters'):\n self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex')\n\n self.event.subscribe('salt/job/{0}'.format(pub_data['jid']))\n\n return pub_data\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.cmd_async
python
def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0
Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L411-L446
[ "def run_job(\n self,\n tgt,\n fun,\n arg=(),\n tgt_type='glob',\n ret='',\n timeout=None,\n jid='',\n kwarg=None,\n listen=False,\n **kwargs):\n '''\n Asynchronously send a command to connected minions\n\n Prep the job directory and publish a command to any targeted minions.\n\n :return: A dictionary of (validated) ``pub_data`` or an empty\n dictionary on failure. The ``pub_data`` contains the job ID and a\n list of all minions that are expected to return data.\n\n .. code-block:: python\n\n >>> local.run_job('*', 'test.sleep', [300])\n {'jid': '20131219215650131543', 'minions': ['jerry']}\n '''\n arg = salt.utils.args.parse_input(arg, kwargs=kwarg)\n\n try:\n pub_data = self.pub(\n tgt,\n fun,\n arg,\n tgt_type,\n ret,\n jid=jid,\n timeout=self._get_timeout(timeout),\n listen=listen,\n **kwargs)\n except SaltClientError:\n # Re-raise error with specific message\n raise SaltClientError(\n 'The salt master could not be contacted. Is master running?'\n )\n except AuthenticationError as err:\n raise AuthenticationError(err)\n except AuthorizationError as err:\n raise AuthorizationError(err)\n except Exception as general_exception:\n # Convert to generic client error and pass along message\n raise SaltClientError(general_exception)\n\n return self._check_pub_data(pub_data, listen=listen)\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.cmd_subset
python
def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs)
Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True}
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L448-L500
[ "def cmd(self,\n tgt,\n fun,\n arg=(),\n timeout=None,\n tgt_type='glob',\n ret='',\n jid='',\n full_return=False,\n kwarg=None,\n **kwargs):\n '''\n Synchronously execute a command on targeted minions\n\n The cmd method will execute and wait for the timeout period for all\n minions to reply, then it will return all minion data at once.\n\n .. code-block:: python\n\n >>> import salt.client\n >>> local = salt.client.LocalClient()\n >>> local.cmd('*', 'cmd.run', ['whoami'])\n {'jerry': 'root'}\n\n With extra keyword arguments for the command function to be run:\n\n .. code-block:: python\n\n local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'})\n\n Compound commands can be used for multiple executions in a single\n publish. Function names and function arguments are provided in separate\n lists but the index values must correlate and an empty list must be\n used if no arguments are required.\n\n .. code-block:: python\n\n >>> local.cmd('*', [\n 'grains.items',\n 'sys.doc',\n 'cmd.run',\n ],\n [\n [],\n [],\n ['uptime'],\n ])\n\n :param tgt: Which minions to target for the execution. Default is shell\n glob. Modified by the ``tgt_type`` option.\n :type tgt: string or list\n\n :param fun: The module and function to call on the specified minions of\n the form ``module.function``. For example ``test.ping`` or\n ``grains.items``.\n\n Compound commands\n Multiple functions may be called in a single publish by\n passing a list of commands. This can dramatically lower\n overhead and speed up the application communicating with Salt.\n\n This requires that the ``arg`` param is a list of lists. The\n ``fun`` list and the ``arg`` list must correlate by index\n meaning a function that does not take arguments must still have\n a corresponding empty list at the expected index.\n :type fun: string or list of strings\n\n :param arg: A list of arguments to pass to the remote function. If the\n function takes no arguments ``arg`` may be omitted except when\n executing a compound command.\n :type arg: list or list-of-lists\n\n :param timeout: Seconds to wait after the last minion returns but\n before all minions return.\n\n :param tgt_type: The type of ``tgt``. Allowed values:\n\n * ``glob`` - Bash glob completion - Default\n * ``pcre`` - Perl style regular expression\n * ``list`` - Python list of hosts\n * ``grain`` - Match based on a grain comparison\n * ``grain_pcre`` - Grain comparison with a regex\n * ``pillar`` - Pillar data comparison\n * ``pillar_pcre`` - Pillar data comparison with a regex\n * ``nodegroup`` - Match on nodegroup\n * ``range`` - Use a Range server for matching\n * ``compound`` - Pass a compound match string\n * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address.\n\n .. versionchanged:: 2017.7.0\n Renamed from ``expr_form`` to ``tgt_type``\n\n :param ret: The returner to use. The value passed can be single\n returner, or a comma delimited list of returners to call in order\n on the minions\n\n :param kwarg: A dictionary with keyword arguments for the function.\n\n :param full_return: Output the job return only (default) or the full\n return including exit code and other job metadata.\n\n :param kwargs: Optional keyword arguments.\n Authentication credentials may be passed when using\n :conf_master:`external_auth`.\n\n For example: ``local.cmd('*', 'test.ping', username='saltdev',\n password='saltdev', eauth='pam')``.\n Or: ``local.cmd('*', 'test.ping',\n token='5871821ea51754fdcea8153c1c745433')``\n\n :returns: A dictionary with the result of the execution, keyed by\n minion ID. A compound command will return a sub-dictionary keyed by\n function name.\n '''\n was_listening = self.event.cpub\n\n try:\n pub_data = self.run_job(tgt,\n fun,\n arg,\n tgt_type,\n ret,\n timeout,\n jid,\n kwarg=kwarg,\n listen=True,\n **kwargs)\n\n if not pub_data:\n return pub_data\n\n ret = {}\n for fn_ret in self.get_cli_event_returns(\n pub_data['jid'],\n pub_data['minions'],\n self._get_timeout(timeout),\n tgt,\n tgt_type,\n **kwargs):\n\n if fn_ret:\n for mid, data in six.iteritems(fn_ret):\n ret[mid] = (data if full_return\n else data.get('ret', {}))\n\n for failed in list(set(pub_data['minions']) - set(ret)):\n ret[failed] = False\n return ret\n finally:\n if not was_listening:\n self.event.close_pub()\n", "def cmd_cli(\n self,\n tgt,\n fun,\n arg=(),\n timeout=None,\n tgt_type='glob',\n ret='',\n verbose=False,\n kwarg=None,\n progress=False,\n **kwargs):\n '''\n Used by the :command:`salt` CLI. This method returns minion returns as\n they come back and attempts to block until all minions return.\n\n The function signature is the same as :py:meth:`cmd` with the\n following exceptions.\n\n :param verbose: Print extra information about the running command\n :returns: A generator\n '''\n was_listening = self.event.cpub\n\n if fun.startswith('state.'):\n ref = {'compound': '-C',\n 'glob': '',\n 'grain': '-G',\n 'grain_pcre': '-P',\n 'ipcidr': '-S',\n 'list': '-L',\n 'nodegroup': '-N',\n 'pcre': '-E',\n 'pillar': '-I',\n 'pillar_pcre': '-J'}\n if HAS_RANGE:\n ref['range'] = '-R'\n if ref[tgt_type].startswith('-'):\n self.target_data = \"{0} '{1}'\".format(\n ref[tgt_type],\n ','.join(tgt) if isinstance(tgt, list) else tgt)\n else:\n self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt\n else:\n self.target_data = ''\n\n try:\n self.pub_data = self.run_job(\n tgt,\n fun,\n arg,\n tgt_type,\n ret,\n timeout,\n kwarg=kwarg,\n listen=True,\n **kwargs)\n\n if not self.pub_data:\n yield self.pub_data\n else:\n try:\n for fn_ret in self.get_cli_event_returns(\n self.pub_data['jid'],\n self.pub_data['minions'],\n self._get_timeout(timeout),\n tgt,\n tgt_type,\n verbose,\n progress,\n **kwargs):\n\n if not fn_ret:\n continue\n\n yield fn_ret\n except KeyboardInterrupt:\n exit_msg = (\n '\\nExiting gracefully on Ctrl-c'\n '\\n'\n 'This job\\'s jid is: {0}\\n'\n 'The minions may not have all finished running and any '\n 'remaining minions will return upon completion.\\n\\n'\n 'To look up the return data for this job later, run the '\n 'following command:\\n'\n 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid']))\n if self.target_data:\n exit_msg += (\n '\\n\\n'\n 'To set up the state run to safely exit, run the following command:\\n'\n 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid']))\n raise SystemExit(exit_msg)\n finally:\n if not was_listening:\n self.event.close_pub()\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.cmd_batch
python
def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret
Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}}
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L502-L572
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def parse_input(args, kwargs=None, condition=True, no_parse=None):\n '''\n Parse out the args and kwargs from a list of input values. Optionally,\n return the args and kwargs without passing them to condition_input().\n\n Don't pull args with key=val apart if it has a newline in it.\n '''\n if no_parse is None:\n no_parse = ()\n if kwargs is None:\n kwargs = {}\n _args = []\n _kwargs = {}\n for arg in args:\n if isinstance(arg, six.string_types):\n arg_name, arg_value = parse_kwarg(arg)\n if arg_name:\n _kwargs[arg_name] = yamlify_arg(arg_value) \\\n if arg_name not in no_parse \\\n else arg_value\n else:\n _args.append(yamlify_arg(arg))\n elif isinstance(arg, dict):\n # Yes, we're popping this key off and adding it back if\n # condition_input is called below, but this is the only way to\n # gracefully handle both CLI and API input.\n if arg.pop('__kwarg__', False) is True:\n _kwargs.update(arg)\n else:\n _args.append(arg)\n else:\n _args.append(arg)\n _kwargs.update(kwargs)\n if condition:\n return condition_input(_args, _kwargs)\n return _args, _kwargs\n", "def batch_get_opts(\n tgt,\n fun,\n batch,\n parent_opts,\n arg=(),\n tgt_type='glob',\n ret='',\n kwarg=None,\n **kwargs):\n # We need to re-import salt.utils.args here\n # even though it has already been imported.\n # when cmd_batch is called via the NetAPI\n # the module is unavailable.\n import salt.utils.args\n\n arg = salt.utils.args.condition_input(arg, kwarg)\n opts = {'tgt': tgt,\n 'fun': fun,\n 'arg': arg,\n 'tgt_type': tgt_type,\n 'ret': ret,\n 'batch': batch,\n 'failhard': kwargs.get('failhard', False),\n 'raw': kwargs.get('raw', False)}\n\n if 'timeout' in kwargs:\n opts['timeout'] = kwargs['timeout']\n if 'gather_job_timeout' in kwargs:\n opts['gather_job_timeout'] = kwargs['gather_job_timeout']\n if 'batch_wait' in kwargs:\n opts['batch_wait'] = int(kwargs['batch_wait'])\n\n for key, val in six.iteritems(parent_opts):\n if key not in opts:\n opts[key] = val\n\n return opts\n", "def batch_get_eauth(kwargs):\n eauth = {}\n if 'eauth' in kwargs:\n eauth['eauth'] = kwargs.pop('eauth')\n if 'username' in kwargs:\n eauth['username'] = kwargs.pop('username')\n if 'password' in kwargs:\n eauth['password'] = kwargs.pop('password')\n if 'token' in kwargs:\n eauth['token'] = kwargs.pop('token')\n return eauth\n", "def run(self):\n '''\n Execute the batch run\n '''\n args = [[],\n self.opts['fun'],\n self.opts['arg'],\n self.opts['timeout'],\n 'list',\n ]\n bnum = self.get_bnum()\n # No targets to run\n if not self.minions:\n return\n to_run = copy.deepcopy(self.minions)\n active = []\n ret = {}\n iters = []\n # wait the specified time before decide a job is actually done\n bwait = self.opts.get('batch_wait', 0)\n wait = []\n\n if self.options:\n show_jid = self.options.show_jid\n show_verbose = self.options.verbose\n else:\n show_jid = False\n show_verbose = False\n\n # the minion tracker keeps track of responses and iterators\n # - it removes finished iterators from iters[]\n # - if a previously detected minion does not respond, its\n # added with an empty answer to ret{} once the timeout is reached\n # - unresponsive minions are removed from active[] to make\n # sure that the main while loop finishes even with unresp minions\n minion_tracker = {}\n\n if not self.quiet:\n # We already know some minions didn't respond to the ping, so inform\n # the user we won't be attempting to run a job on them\n for down_minion in self.down_minions:\n salt.utils.stringutils.print_cli('Minion {0} did not respond. No job will be sent.'.format(down_minion))\n\n # Iterate while we still have things to execute\n while len(ret) < len(self.minions):\n next_ = []\n if bwait and wait:\n self.__update_wait(wait)\n if len(to_run) <= bnum - len(wait) and not active:\n # last bit of them, add them all to next iterator\n while to_run:\n next_.append(to_run.pop())\n else:\n for i in range(bnum - len(active) - len(wait)):\n if to_run:\n minion_id = to_run.pop()\n if isinstance(minion_id, dict):\n next_.append(minion_id.keys()[0])\n else:\n next_.append(minion_id)\n\n active += next_\n args[0] = next_\n\n if next_:\n if not self.quiet:\n salt.utils.stringutils.print_cli('\\nExecuting run on {0}\\n'.format(sorted(next_)))\n # create a new iterator for this batch of minions\n new_iter = self.local.cmd_iter_no_block(\n *args,\n raw=self.opts.get('raw', False),\n ret=self.opts.get('return', ''),\n show_jid=show_jid,\n verbose=show_verbose,\n gather_job_timeout=self.opts['gather_job_timeout'],\n **self.eauth)\n # add it to our iterators and to the minion_tracker\n iters.append(new_iter)\n minion_tracker[new_iter] = {}\n # every iterator added is 'active' and has its set of minions\n minion_tracker[new_iter]['minions'] = next_\n minion_tracker[new_iter]['active'] = True\n\n else:\n time.sleep(0.02)\n parts = {}\n\n # see if we found more minions\n for ping_ret in self.ping_gen:\n if ping_ret is None:\n break\n m = next(six.iterkeys(ping_ret))\n if m not in self.minions:\n self.minions.append(m)\n to_run.append(m)\n\n for queue in iters:\n try:\n # Gather returns until we get to the bottom\n ncnt = 0\n while True:\n part = next(queue)\n if part is None:\n time.sleep(0.01)\n ncnt += 1\n if ncnt > 5:\n break\n continue\n if self.opts.get('raw'):\n parts.update({part['data']['id']: part})\n if part['data']['id'] in minion_tracker[queue]['minions']:\n minion_tracker[queue]['minions'].remove(part['data']['id'])\n else:\n salt.utils.stringutils.print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(part['id']))\n else:\n parts.update(part)\n for id in part:\n if id in minion_tracker[queue]['minions']:\n minion_tracker[queue]['minions'].remove(id)\n else:\n salt.utils.stringutils.print_cli('minion {0} was already deleted from tracker, probably a duplicate key'.format(id))\n except StopIteration:\n # if a iterator is done:\n # - set it to inactive\n # - add minions that have not responded to parts{}\n\n # check if the tracker contains the iterator\n if queue in minion_tracker:\n minion_tracker[queue]['active'] = False\n\n # add all minions that belong to this iterator and\n # that have not responded to parts{} with an empty response\n for minion in minion_tracker[queue]['minions']:\n if minion not in parts:\n parts[minion] = {}\n parts[minion]['ret'] = {}\n\n for minion, data in six.iteritems(parts):\n if minion in active:\n active.remove(minion)\n if bwait:\n wait.append(datetime.now() + timedelta(seconds=bwait))\n # Munge retcode into return data\n failhard = False\n if 'retcode' in data and isinstance(data['ret'], dict) and 'retcode' not in data['ret']:\n data['ret']['retcode'] = data['retcode']\n if self.opts.get('failhard') and data['ret']['retcode'] > 0:\n failhard = True\n\n if self.opts.get('raw'):\n ret[minion] = data\n yield data\n else:\n ret[minion] = data['ret']\n yield {minion: data['ret']}\n if not self.quiet:\n ret[minion] = data['ret']\n data[minion] = data.pop('ret')\n if 'out' in data:\n out = data.pop('out')\n else:\n out = None\n salt.output.display_output(\n data,\n out,\n self.opts)\n if failhard:\n log.error(\n 'Minion %s returned with non-zero exit code. '\n 'Batch run stopped due to failhard', minion\n )\n raise StopIteration\n\n # remove inactive iterators from the iters list\n for queue in minion_tracker:\n # only remove inactive queues\n if not minion_tracker[queue]['active'] and queue in iters:\n iters.remove(queue)\n # also remove the iterator's minions from the active list\n for minion in minion_tracker[queue]['minions']:\n if minion in active:\n active.remove(minion)\n if bwait:\n wait.append(datetime.now() + timedelta(seconds=bwait))\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.cmd
python
def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub()
Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L574-L724
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def _get_timeout(self, timeout):\n '''\n Return the timeout to use\n '''\n if timeout is None:\n return self.opts['timeout']\n if isinstance(timeout, int):\n return timeout\n if isinstance(timeout, six.string_types):\n try:\n return int(timeout)\n except ValueError:\n return self.opts['timeout']\n # Looks like the timeout is invalid, use config\n return self.opts['timeout']\n", "def run_job(\n self,\n tgt,\n fun,\n arg=(),\n tgt_type='glob',\n ret='',\n timeout=None,\n jid='',\n kwarg=None,\n listen=False,\n **kwargs):\n '''\n Asynchronously send a command to connected minions\n\n Prep the job directory and publish a command to any targeted minions.\n\n :return: A dictionary of (validated) ``pub_data`` or an empty\n dictionary on failure. The ``pub_data`` contains the job ID and a\n list of all minions that are expected to return data.\n\n .. code-block:: python\n\n >>> local.run_job('*', 'test.sleep', [300])\n {'jid': '20131219215650131543', 'minions': ['jerry']}\n '''\n arg = salt.utils.args.parse_input(arg, kwargs=kwarg)\n\n try:\n pub_data = self.pub(\n tgt,\n fun,\n arg,\n tgt_type,\n ret,\n jid=jid,\n timeout=self._get_timeout(timeout),\n listen=listen,\n **kwargs)\n except SaltClientError:\n # Re-raise error with specific message\n raise SaltClientError(\n 'The salt master could not be contacted. Is master running?'\n )\n except AuthenticationError as err:\n raise AuthenticationError(err)\n except AuthorizationError as err:\n raise AuthorizationError(err)\n except Exception as general_exception:\n # Convert to generic client error and pass along message\n raise SaltClientError(general_exception)\n\n return self._check_pub_data(pub_data, listen=listen)\n", "def get_cli_event_returns(\n self,\n jid,\n minions,\n timeout=None,\n tgt='*',\n tgt_type='glob',\n verbose=False,\n progress=False,\n show_timeout=False,\n show_jid=False,\n **kwargs):\n '''\n Get the returns for the command line interface via the event system\n '''\n log.trace('func get_cli_event_returns()')\n\n if verbose:\n msg = 'Executing job with jid {0}'.format(jid)\n print(msg)\n print('-' * len(msg) + '\\n')\n elif show_jid:\n print('jid: {0}'.format(jid))\n\n # lazy load the connected minions\n connected_minions = None\n return_count = 0\n\n for ret in self.get_iter_returns(jid,\n minions,\n timeout=timeout,\n tgt=tgt,\n tgt_type=tgt_type,\n # (gtmanfred) expect_minions is popped here incase it is passed from a client\n # call. If this is not popped, then it would be passed twice to\n # get_iter_returns.\n expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout),\n **kwargs\n ):\n log.debug('return event: %s', ret)\n return_count = return_count + 1\n if progress:\n for id_, min_ret in six.iteritems(ret):\n if not min_ret.get('failed') is True:\n yield {'minion_count': len(minions), 'return_count': return_count}\n # replace the return structure for missing minions\n for id_, min_ret in six.iteritems(ret):\n if min_ret.get('failed') is True:\n if connected_minions is None:\n connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids()\n if self.opts['minion_data_cache'] \\\n and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \\\n and connected_minions \\\n and id_ not in connected_minions:\n\n yield {\n id_: {\n 'out': 'no_return',\n 'ret': 'Minion did not return. [Not connected]',\n 'retcode': salt.defaults.exitcodes.EX_GENERIC\n }\n }\n else:\n # don't report syndics as unresponsive minions\n if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)):\n yield {\n id_: {\n 'out': 'no_return',\n 'ret': 'Minion did not return. [No response]'\n '\\nThe minions may not have all finished running and any '\n 'remaining minions will return upon completion. To look '\n 'up the return data for this job later, run the following '\n 'command:\\n\\n'\n 'salt-run jobs.lookup_jid {0}'.format(jid),\n 'retcode': salt.defaults.exitcodes.EX_GENERIC\n }\n }\n else:\n yield {id_: min_ret}\n\n self._clean_up_subscriptions(jid)\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.cmd_cli
python
def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub()
Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L726-L820
[ "def _get_timeout(self, timeout):\n '''\n Return the timeout to use\n '''\n if timeout is None:\n return self.opts['timeout']\n if isinstance(timeout, int):\n return timeout\n if isinstance(timeout, six.string_types):\n try:\n return int(timeout)\n except ValueError:\n return self.opts['timeout']\n # Looks like the timeout is invalid, use config\n return self.opts['timeout']\n", "def run_job(\n self,\n tgt,\n fun,\n arg=(),\n tgt_type='glob',\n ret='',\n timeout=None,\n jid='',\n kwarg=None,\n listen=False,\n **kwargs):\n '''\n Asynchronously send a command to connected minions\n\n Prep the job directory and publish a command to any targeted minions.\n\n :return: A dictionary of (validated) ``pub_data`` or an empty\n dictionary on failure. The ``pub_data`` contains the job ID and a\n list of all minions that are expected to return data.\n\n .. code-block:: python\n\n >>> local.run_job('*', 'test.sleep', [300])\n {'jid': '20131219215650131543', 'minions': ['jerry']}\n '''\n arg = salt.utils.args.parse_input(arg, kwargs=kwarg)\n\n try:\n pub_data = self.pub(\n tgt,\n fun,\n arg,\n tgt_type,\n ret,\n jid=jid,\n timeout=self._get_timeout(timeout),\n listen=listen,\n **kwargs)\n except SaltClientError:\n # Re-raise error with specific message\n raise SaltClientError(\n 'The salt master could not be contacted. Is master running?'\n )\n except AuthenticationError as err:\n raise AuthenticationError(err)\n except AuthorizationError as err:\n raise AuthorizationError(err)\n except Exception as general_exception:\n # Convert to generic client error and pass along message\n raise SaltClientError(general_exception)\n\n return self._check_pub_data(pub_data, listen=listen)\n", "def get_cli_event_returns(\n self,\n jid,\n minions,\n timeout=None,\n tgt='*',\n tgt_type='glob',\n verbose=False,\n progress=False,\n show_timeout=False,\n show_jid=False,\n **kwargs):\n '''\n Get the returns for the command line interface via the event system\n '''\n log.trace('func get_cli_event_returns()')\n\n if verbose:\n msg = 'Executing job with jid {0}'.format(jid)\n print(msg)\n print('-' * len(msg) + '\\n')\n elif show_jid:\n print('jid: {0}'.format(jid))\n\n # lazy load the connected minions\n connected_minions = None\n return_count = 0\n\n for ret in self.get_iter_returns(jid,\n minions,\n timeout=timeout,\n tgt=tgt,\n tgt_type=tgt_type,\n # (gtmanfred) expect_minions is popped here incase it is passed from a client\n # call. If this is not popped, then it would be passed twice to\n # get_iter_returns.\n expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout),\n **kwargs\n ):\n log.debug('return event: %s', ret)\n return_count = return_count + 1\n if progress:\n for id_, min_ret in six.iteritems(ret):\n if not min_ret.get('failed') is True:\n yield {'minion_count': len(minions), 'return_count': return_count}\n # replace the return structure for missing minions\n for id_, min_ret in six.iteritems(ret):\n if min_ret.get('failed') is True:\n if connected_minions is None:\n connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids()\n if self.opts['minion_data_cache'] \\\n and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \\\n and connected_minions \\\n and id_ not in connected_minions:\n\n yield {\n id_: {\n 'out': 'no_return',\n 'ret': 'Minion did not return. [Not connected]',\n 'retcode': salt.defaults.exitcodes.EX_GENERIC\n }\n }\n else:\n # don't report syndics as unresponsive minions\n if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)):\n yield {\n id_: {\n 'out': 'no_return',\n 'ret': 'Minion did not return. [No response]'\n '\\nThe minions may not have all finished running and any '\n 'remaining minions will return upon completion. To look '\n 'up the return data for this job later, run the following '\n 'command:\\n\\n'\n 'salt-run jobs.lookup_jid {0}'.format(jid),\n 'retcode': salt.defaults.exitcodes.EX_GENERIC\n }\n }\n else:\n yield {id_: min_ret}\n\n self._clean_up_subscriptions(jid)\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.cmd_iter
python
def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub()
Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}}
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L822-L884
[ "def _get_timeout(self, timeout):\n '''\n Return the timeout to use\n '''\n if timeout is None:\n return self.opts['timeout']\n if isinstance(timeout, int):\n return timeout\n if isinstance(timeout, six.string_types):\n try:\n return int(timeout)\n except ValueError:\n return self.opts['timeout']\n # Looks like the timeout is invalid, use config\n return self.opts['timeout']\n", "def run_job(\n self,\n tgt,\n fun,\n arg=(),\n tgt_type='glob',\n ret='',\n timeout=None,\n jid='',\n kwarg=None,\n listen=False,\n **kwargs):\n '''\n Asynchronously send a command to connected minions\n\n Prep the job directory and publish a command to any targeted minions.\n\n :return: A dictionary of (validated) ``pub_data`` or an empty\n dictionary on failure. The ``pub_data`` contains the job ID and a\n list of all minions that are expected to return data.\n\n .. code-block:: python\n\n >>> local.run_job('*', 'test.sleep', [300])\n {'jid': '20131219215650131543', 'minions': ['jerry']}\n '''\n arg = salt.utils.args.parse_input(arg, kwargs=kwarg)\n\n try:\n pub_data = self.pub(\n tgt,\n fun,\n arg,\n tgt_type,\n ret,\n jid=jid,\n timeout=self._get_timeout(timeout),\n listen=listen,\n **kwargs)\n except SaltClientError:\n # Re-raise error with specific message\n raise SaltClientError(\n 'The salt master could not be contacted. Is master running?'\n )\n except AuthenticationError as err:\n raise AuthenticationError(err)\n except AuthorizationError as err:\n raise AuthorizationError(err)\n except Exception as general_exception:\n # Convert to generic client error and pass along message\n raise SaltClientError(general_exception)\n\n return self._check_pub_data(pub_data, listen=listen)\n", "def get_iter_returns(\n self,\n jid,\n minions,\n timeout=None,\n tgt='*',\n tgt_type='glob',\n expect_minions=False,\n block=True,\n **kwargs):\n '''\n Watch the event system and return job data as it comes in\n\n :returns: all of the information for the JID\n '''\n if not isinstance(minions, set):\n if isinstance(minions, six.string_types):\n minions = set([minions])\n elif isinstance(minions, (list, tuple)):\n minions = set(list(minions))\n\n if timeout is None:\n timeout = self.opts['timeout']\n gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout']))\n start = int(time.time())\n\n # timeouts per minion, id_ -> timeout time\n minion_timeouts = {}\n\n found = set()\n missing = set()\n # Check to see if the jid is real, if not return the empty dict\n try:\n if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}:\n log.warning('jid does not exist')\n yield {}\n # stop the iteration, since the jid is invalid\n raise StopIteration()\n except Exception as exc:\n log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG)\n # Wait for the hosts to check in\n last_time = False\n # iterator for this job's return\n if self.opts['order_masters']:\n # If we are a MoM, we need to gather expected minions from downstreams masters.\n ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex')\n else:\n ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid))\n # iterator for the info of this job\n jinfo_iter = []\n # open event jids that need to be un-subscribed from later\n open_jids = set()\n timeout_at = time.time() + timeout\n gather_syndic_wait = time.time() + self.opts['syndic_wait']\n # are there still minions running the job out there\n # start as True so that we ping at least once\n minions_running = True\n log.debug(\n 'get_iter_returns for jid %s sent to %s will timeout at %s',\n jid, minions, datetime.fromtimestamp(timeout_at).time()\n )\n while True:\n # Process events until timeout is reached or all minions have returned\n for raw in ret_iter:\n # if we got None, then there were no events\n if raw is None:\n break\n if 'minions' in raw.get('data', {}):\n minions.update(raw['data']['minions'])\n if 'missing' in raw.get('data', {}):\n missing.update(raw['data']['missing'])\n continue\n if 'return' not in raw['data']:\n continue\n if kwargs.get('raw', False):\n found.add(raw['data']['id'])\n yield raw\n else:\n found.add(raw['data']['id'])\n ret = {raw['data']['id']: {'ret': raw['data']['return']}}\n if 'out' in raw['data']:\n ret[raw['data']['id']]['out'] = raw['data']['out']\n if 'retcode' in raw['data']:\n ret[raw['data']['id']]['retcode'] = raw['data']['retcode']\n if 'jid' in raw['data']:\n ret[raw['data']['id']]['jid'] = raw['data']['jid']\n if kwargs.get('_cmd_meta', False):\n ret[raw['data']['id']].update(raw['data'])\n log.debug('jid %s return from %s', jid, raw['data']['id'])\n yield ret\n\n # if we have all of the returns (and we aren't a syndic), no need for anything fancy\n if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']:\n # All minions have returned, break out of the loop\n log.debug('jid %s found all minions %s', jid, found)\n break\n elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']:\n if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait:\n # There were some minions to find and we found them\n # However, this does not imply that *all* masters have yet responded with expected minion lists.\n # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see\n # if additional lower-level masters deliver their lists of expected\n # minions.\n break\n # If we get here we may not have gathered the minion list yet. Keep waiting\n # for all lower-level masters to respond with their minion lists\n\n # let start the timeouts for all remaining minions\n\n for id_ in minions - found:\n # if we have a new minion in the list, make sure it has a timeout\n if id_ not in minion_timeouts:\n minion_timeouts[id_] = time.time() + timeout\n\n # if the jinfo has timed out and some minions are still running the job\n # re-do the ping\n if time.time() > timeout_at and minions_running:\n # since this is a new ping, no one has responded yet\n jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs)\n minions_running = False\n # if we weren't assigned any jid that means the master thinks\n # we have nothing to send\n if 'jid' not in jinfo:\n jinfo_iter = []\n else:\n jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid']))\n timeout_at = time.time() + gather_job_timeout\n # if you are a syndic, wait a little longer\n if self.opts['order_masters']:\n timeout_at += self.opts.get('syndic_wait', 1)\n\n # check for minions that are running the job still\n for raw in jinfo_iter:\n # if there are no more events, lets stop waiting for the jinfo\n if raw is None:\n break\n try:\n if raw['data']['retcode'] > 0:\n log.error('saltutil returning errors on minion %s', raw['data']['id'])\n minions.remove(raw['data']['id'])\n break\n except KeyError as exc:\n # This is a safe pass. We're just using the try/except to\n # avoid having to deep-check for keys.\n missing_key = exc.__str__().strip('\\'\"')\n if missing_key == 'retcode':\n log.debug('retcode missing from client return')\n else:\n log.debug(\n 'Passing on saltutil error. Key \\'%s\\' missing '\n 'from client return. This may be an error in '\n 'the client.', missing_key\n )\n # Keep track of the jid events to unsubscribe from later\n open_jids.add(jinfo['jid'])\n\n # TODO: move to a library??\n if 'minions' in raw.get('data', {}):\n minions.update(raw['data']['minions'])\n continue\n if 'syndic' in raw.get('data', {}):\n minions.update(raw['syndic'])\n continue\n if 'return' not in raw.get('data', {}):\n continue\n\n # if the job isn't running there anymore... don't count\n if raw['data']['return'] == {}:\n continue\n\n # if the minion throws an exception containing the word \"return\"\n # the master will try to handle the string as a dict in the next\n # step. Check if we have a string, log the issue and continue.\n if isinstance(raw['data']['return'], six.string_types):\n log.error(\"unexpected return from minion: %s\", raw)\n continue\n\n if 'return' in raw['data']['return'] and \\\n raw['data']['return']['return'] == {}:\n continue\n\n # if we didn't originally target the minion, lets add it to the list\n if raw['data']['id'] not in minions:\n minions.add(raw['data']['id'])\n # update this minion's timeout, as long as the job is still running\n minion_timeouts[raw['data']['id']] = time.time() + timeout\n # a minion returned, so we know its running somewhere\n minions_running = True\n\n # if we have hit gather_job_timeout (after firing the job) AND\n # if we have hit all minion timeouts, lets call it\n now = time.time()\n # if we have finished waiting, and no minions are running the job\n # then we need to see if each minion has timedout\n done = (now > timeout_at) and not minions_running\n if done:\n # if all minions have timeod out\n for id_ in minions - found:\n if now < minion_timeouts[id_]:\n done = False\n break\n if done:\n break\n\n # don't spin\n if block:\n time.sleep(0.01)\n else:\n yield\n\n # If there are any remaining open events, clean them up.\n if open_jids:\n for jid in open_jids:\n self.event.unsubscribe(jid)\n\n if expect_minions:\n for minion in list((minions - found)):\n yield {minion: {'failed': True}}\n\n # Filter out any minions marked as missing for which we received\n # returns (prevents false events sent due to higher-level masters not\n # knowing about lower-level minions).\n missing -= found\n\n # Report on missing minions\n if missing:\n for minion in missing:\n yield {minion: {'failed': True}}\n", "def _clean_up_subscriptions(self, job_id):\n if self.opts.get('order_masters'):\n self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex')\n self.event.unsubscribe('salt/job/{0}'.format(job_id))\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.cmd_iter_no_block
python
def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub()
Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}}
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L886-L952
[ "def run_job(\n self,\n tgt,\n fun,\n arg=(),\n tgt_type='glob',\n ret='',\n timeout=None,\n jid='',\n kwarg=None,\n listen=False,\n **kwargs):\n '''\n Asynchronously send a command to connected minions\n\n Prep the job directory and publish a command to any targeted minions.\n\n :return: A dictionary of (validated) ``pub_data`` or an empty\n dictionary on failure. The ``pub_data`` contains the job ID and a\n list of all minions that are expected to return data.\n\n .. code-block:: python\n\n >>> local.run_job('*', 'test.sleep', [300])\n {'jid': '20131219215650131543', 'minions': ['jerry']}\n '''\n arg = salt.utils.args.parse_input(arg, kwargs=kwarg)\n\n try:\n pub_data = self.pub(\n tgt,\n fun,\n arg,\n tgt_type,\n ret,\n jid=jid,\n timeout=self._get_timeout(timeout),\n listen=listen,\n **kwargs)\n except SaltClientError:\n # Re-raise error with specific message\n raise SaltClientError(\n 'The salt master could not be contacted. Is master running?'\n )\n except AuthenticationError as err:\n raise AuthenticationError(err)\n except AuthorizationError as err:\n raise AuthorizationError(err)\n except Exception as general_exception:\n # Convert to generic client error and pass along message\n raise SaltClientError(general_exception)\n\n return self._check_pub_data(pub_data, listen=listen)\n", "def get_iter_returns(\n self,\n jid,\n minions,\n timeout=None,\n tgt='*',\n tgt_type='glob',\n expect_minions=False,\n block=True,\n **kwargs):\n '''\n Watch the event system and return job data as it comes in\n\n :returns: all of the information for the JID\n '''\n if not isinstance(minions, set):\n if isinstance(minions, six.string_types):\n minions = set([minions])\n elif isinstance(minions, (list, tuple)):\n minions = set(list(minions))\n\n if timeout is None:\n timeout = self.opts['timeout']\n gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout']))\n start = int(time.time())\n\n # timeouts per minion, id_ -> timeout time\n minion_timeouts = {}\n\n found = set()\n missing = set()\n # Check to see if the jid is real, if not return the empty dict\n try:\n if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}:\n log.warning('jid does not exist')\n yield {}\n # stop the iteration, since the jid is invalid\n raise StopIteration()\n except Exception as exc:\n log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG)\n # Wait for the hosts to check in\n last_time = False\n # iterator for this job's return\n if self.opts['order_masters']:\n # If we are a MoM, we need to gather expected minions from downstreams masters.\n ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex')\n else:\n ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid))\n # iterator for the info of this job\n jinfo_iter = []\n # open event jids that need to be un-subscribed from later\n open_jids = set()\n timeout_at = time.time() + timeout\n gather_syndic_wait = time.time() + self.opts['syndic_wait']\n # are there still minions running the job out there\n # start as True so that we ping at least once\n minions_running = True\n log.debug(\n 'get_iter_returns for jid %s sent to %s will timeout at %s',\n jid, minions, datetime.fromtimestamp(timeout_at).time()\n )\n while True:\n # Process events until timeout is reached or all minions have returned\n for raw in ret_iter:\n # if we got None, then there were no events\n if raw is None:\n break\n if 'minions' in raw.get('data', {}):\n minions.update(raw['data']['minions'])\n if 'missing' in raw.get('data', {}):\n missing.update(raw['data']['missing'])\n continue\n if 'return' not in raw['data']:\n continue\n if kwargs.get('raw', False):\n found.add(raw['data']['id'])\n yield raw\n else:\n found.add(raw['data']['id'])\n ret = {raw['data']['id']: {'ret': raw['data']['return']}}\n if 'out' in raw['data']:\n ret[raw['data']['id']]['out'] = raw['data']['out']\n if 'retcode' in raw['data']:\n ret[raw['data']['id']]['retcode'] = raw['data']['retcode']\n if 'jid' in raw['data']:\n ret[raw['data']['id']]['jid'] = raw['data']['jid']\n if kwargs.get('_cmd_meta', False):\n ret[raw['data']['id']].update(raw['data'])\n log.debug('jid %s return from %s', jid, raw['data']['id'])\n yield ret\n\n # if we have all of the returns (and we aren't a syndic), no need for anything fancy\n if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']:\n # All minions have returned, break out of the loop\n log.debug('jid %s found all minions %s', jid, found)\n break\n elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']:\n if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait:\n # There were some minions to find and we found them\n # However, this does not imply that *all* masters have yet responded with expected minion lists.\n # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see\n # if additional lower-level masters deliver their lists of expected\n # minions.\n break\n # If we get here we may not have gathered the minion list yet. Keep waiting\n # for all lower-level masters to respond with their minion lists\n\n # let start the timeouts for all remaining minions\n\n for id_ in minions - found:\n # if we have a new minion in the list, make sure it has a timeout\n if id_ not in minion_timeouts:\n minion_timeouts[id_] = time.time() + timeout\n\n # if the jinfo has timed out and some minions are still running the job\n # re-do the ping\n if time.time() > timeout_at and minions_running:\n # since this is a new ping, no one has responded yet\n jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs)\n minions_running = False\n # if we weren't assigned any jid that means the master thinks\n # we have nothing to send\n if 'jid' not in jinfo:\n jinfo_iter = []\n else:\n jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid']))\n timeout_at = time.time() + gather_job_timeout\n # if you are a syndic, wait a little longer\n if self.opts['order_masters']:\n timeout_at += self.opts.get('syndic_wait', 1)\n\n # check for minions that are running the job still\n for raw in jinfo_iter:\n # if there are no more events, lets stop waiting for the jinfo\n if raw is None:\n break\n try:\n if raw['data']['retcode'] > 0:\n log.error('saltutil returning errors on minion %s', raw['data']['id'])\n minions.remove(raw['data']['id'])\n break\n except KeyError as exc:\n # This is a safe pass. We're just using the try/except to\n # avoid having to deep-check for keys.\n missing_key = exc.__str__().strip('\\'\"')\n if missing_key == 'retcode':\n log.debug('retcode missing from client return')\n else:\n log.debug(\n 'Passing on saltutil error. Key \\'%s\\' missing '\n 'from client return. This may be an error in '\n 'the client.', missing_key\n )\n # Keep track of the jid events to unsubscribe from later\n open_jids.add(jinfo['jid'])\n\n # TODO: move to a library??\n if 'minions' in raw.get('data', {}):\n minions.update(raw['data']['minions'])\n continue\n if 'syndic' in raw.get('data', {}):\n minions.update(raw['syndic'])\n continue\n if 'return' not in raw.get('data', {}):\n continue\n\n # if the job isn't running there anymore... don't count\n if raw['data']['return'] == {}:\n continue\n\n # if the minion throws an exception containing the word \"return\"\n # the master will try to handle the string as a dict in the next\n # step. Check if we have a string, log the issue and continue.\n if isinstance(raw['data']['return'], six.string_types):\n log.error(\"unexpected return from minion: %s\", raw)\n continue\n\n if 'return' in raw['data']['return'] and \\\n raw['data']['return']['return'] == {}:\n continue\n\n # if we didn't originally target the minion, lets add it to the list\n if raw['data']['id'] not in minions:\n minions.add(raw['data']['id'])\n # update this minion's timeout, as long as the job is still running\n minion_timeouts[raw['data']['id']] = time.time() + timeout\n # a minion returned, so we know its running somewhere\n minions_running = True\n\n # if we have hit gather_job_timeout (after firing the job) AND\n # if we have hit all minion timeouts, lets call it\n now = time.time()\n # if we have finished waiting, and no minions are running the job\n # then we need to see if each minion has timedout\n done = (now > timeout_at) and not minions_running\n if done:\n # if all minions have timeod out\n for id_ in minions - found:\n if now < minion_timeouts[id_]:\n done = False\n break\n if done:\n break\n\n # don't spin\n if block:\n time.sleep(0.01)\n else:\n yield\n\n # If there are any remaining open events, clean them up.\n if open_jids:\n for jid in open_jids:\n self.event.unsubscribe(jid)\n\n if expect_minions:\n for minion in list((minions - found)):\n yield {minion: {'failed': True}}\n\n # Filter out any minions marked as missing for which we received\n # returns (prevents false events sent due to higher-level masters not\n # knowing about lower-level minions).\n missing -= found\n\n # Report on missing minions\n if missing:\n for minion in missing:\n yield {minion: {'failed': True}}\n", "def _clean_up_subscriptions(self, job_id):\n if self.opts.get('order_masters'):\n self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex')\n self.event.unsubscribe('salt/job/{0}'.format(job_id))\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.cmd_full_return
python
def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub()
Execute a salt command and return
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L954-L993
[ "def run_job(\n self,\n tgt,\n fun,\n arg=(),\n tgt_type='glob',\n ret='',\n timeout=None,\n jid='',\n kwarg=None,\n listen=False,\n **kwargs):\n '''\n Asynchronously send a command to connected minions\n\n Prep the job directory and publish a command to any targeted minions.\n\n :return: A dictionary of (validated) ``pub_data`` or an empty\n dictionary on failure. The ``pub_data`` contains the job ID and a\n list of all minions that are expected to return data.\n\n .. code-block:: python\n\n >>> local.run_job('*', 'test.sleep', [300])\n {'jid': '20131219215650131543', 'minions': ['jerry']}\n '''\n arg = salt.utils.args.parse_input(arg, kwargs=kwarg)\n\n try:\n pub_data = self.pub(\n tgt,\n fun,\n arg,\n tgt_type,\n ret,\n jid=jid,\n timeout=self._get_timeout(timeout),\n listen=listen,\n **kwargs)\n except SaltClientError:\n # Re-raise error with specific message\n raise SaltClientError(\n 'The salt master could not be contacted. Is master running?'\n )\n except AuthenticationError as err:\n raise AuthenticationError(err)\n except AuthorizationError as err:\n raise AuthorizationError(err)\n except Exception as general_exception:\n # Convert to generic client error and pass along message\n raise SaltClientError(general_exception)\n\n return self._check_pub_data(pub_data, listen=listen)\n", "def get_cli_static_event_returns(\n self,\n jid,\n minions,\n timeout=None,\n tgt='*',\n tgt_type='glob',\n verbose=False,\n show_timeout=False,\n show_jid=False):\n '''\n Get the returns for the command line interface via the event system\n '''\n log.trace('entered - function get_cli_static_event_returns()')\n minions = set(minions)\n if verbose:\n msg = 'Executing job with jid {0}'.format(jid)\n print(msg)\n print('-' * len(msg) + '\\n')\n elif show_jid:\n print('jid: {0}'.format(jid))\n\n if timeout is None:\n timeout = self.opts['timeout']\n\n start = int(time.time())\n timeout_at = start + timeout\n found = set()\n ret = {}\n # Check to see if the jid is real, if not return the empty dict\n try:\n if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}:\n log.warning('jid does not exist')\n return ret\n except Exception as exc:\n raise SaltClientError('Load could not be retrieved from '\n 'returner {0}. Exception details: {1}'.format(\n self.opts['master_job_cache'],\n exc))\n # Wait for the hosts to check in\n while True:\n # Process events until timeout is reached or all minions have returned\n time_left = timeout_at - int(time.time())\n # Wait 0 == forever, use a minimum of 1s\n wait = max(1, time_left)\n jid_tag = 'salt/job/{0}'.format(jid)\n raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect)\n if raw is not None and 'return' in raw:\n if 'minions' in raw.get('data', {}):\n minions.update(raw['data']['minions'])\n continue\n found.add(raw['id'])\n ret[raw['id']] = {'ret': raw['return']}\n ret[raw['id']]['success'] = raw.get('success', False)\n if 'out' in raw:\n ret[raw['id']]['out'] = raw['out']\n if len(found.intersection(minions)) >= len(minions):\n # All minions have returned, break out of the loop\n break\n continue\n # Then event system timeout was reached and nothing was returned\n if len(found.intersection(minions)) >= len(minions):\n # All minions have returned, break out of the loop\n break\n if int(time.time()) > timeout_at:\n if verbose or show_timeout:\n if self.opts.get('minion_data_cache', False) \\\n or tgt_type in ('glob', 'pcre', 'list'):\n if len(found) < len(minions):\n fail = sorted(list(minions.difference(found)))\n for minion in fail:\n ret[minion] = {\n 'out': 'no_return',\n 'ret': 'Minion did not return'\n }\n break\n time.sleep(0.01)\n\n self._clean_up_subscriptions(jid)\n return ret\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.get_cli_returns
python
def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration()
Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L995-L1043
[ "def get_cache_returns(self, jid):\n '''\n Execute a single pass to gather the contents of the job cache\n '''\n ret = {}\n\n try:\n data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid)\n except Exception as exc:\n raise SaltClientError('Could not examine master job cache. '\n 'Error occurred in {0} returner. '\n 'Exception details: {1}'.format(self.opts['master_job_cache'],\n exc))\n for minion in data:\n m_data = {}\n if 'return' in data[minion]:\n m_data['ret'] = data[minion].get('return')\n else:\n m_data['ret'] = data[minion].get('return')\n if 'out' in data[minion]:\n m_data['out'] = data[minion]['out']\n if minion in ret:\n ret[minion].update(m_data)\n else:\n ret[minion] = m_data\n\n return ret\n", "def get_event_iter_returns(self, jid, minions, timeout=None):\n '''\n Gather the return data from the event system, break hard when timeout\n is reached.\n '''\n log.trace('entered - function get_event_iter_returns()')\n if timeout is None:\n timeout = self.opts['timeout']\n\n timeout_at = time.time() + timeout\n\n found = set()\n # Check to see if the jid is real, if not return the empty dict\n if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}:\n log.warning('jid does not exist')\n yield {}\n # stop the iteration, since the jid is invalid\n raise StopIteration()\n # Wait for the hosts to check in\n while True:\n raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect)\n if raw is None or time.time() > timeout_at:\n # Timeout reached\n break\n if 'minions' in raw.get('data', {}):\n continue\n try:\n found.add(raw['id'])\n ret = {raw['id']: {'ret': raw['return']}}\n except KeyError:\n # Ignore other erroneous messages\n continue\n if 'out' in raw:\n ret[raw['id']]['out'] = raw['out']\n yield ret\n time.sleep(0.02)\n", "def _clean_up_subscriptions(self, job_id):\n if self.opts.get('order_masters'):\n self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex')\n self.event.unsubscribe('salt/job/{0}'.format(job_id))\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.get_returns_no_block
python
def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw
Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L1046-L1063
null
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.get_iter_returns
python
def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}}
Watch the event system and return job data as it comes in :returns: all of the information for the JID
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L1065-L1292
[ "def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs):\n '''\n Return the information about a given job\n '''\n log.debug('Checking whether jid %s is still running', jid)\n timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout']))\n\n pub_data = self.run_job(tgt,\n 'saltutil.find_job',\n arg=[jid],\n tgt_type=tgt_type,\n timeout=timeout,\n listen=listen,\n **kwargs\n )\n\n if 'jid' in pub_data:\n self.event.subscribe(pub_data['jid'])\n\n return pub_data\n", "def get_returns_no_block(\n self,\n tag,\n match_type=None):\n '''\n Raw function to just return events of jid excluding timeout logic\n\n Yield either the raw event data or None\n\n Pass a list of additional regular expressions as `tags_regex` to search\n the event bus for non-return data, such as minion lists returned from\n syndics.\n '''\n\n while True:\n raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True,\n no_block=True, auto_reconnect=self.auto_reconnect)\n yield raw\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.get_returns
python
def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret
Get the returns for the command line interface via the event system
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L1294-L1348
null
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.get_full_returns
python
def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret
This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L1350-L1403
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def get_event_iter_returns(self, jid, minions, timeout=None):\n '''\n Gather the return data from the event system, break hard when timeout\n is reached.\n '''\n log.trace('entered - function get_event_iter_returns()')\n if timeout is None:\n timeout = self.opts['timeout']\n\n timeout_at = time.time() + timeout\n\n found = set()\n # Check to see if the jid is real, if not return the empty dict\n if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}:\n log.warning('jid does not exist')\n yield {}\n # stop the iteration, since the jid is invalid\n raise StopIteration()\n # Wait for the hosts to check in\n while True:\n raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect)\n if raw is None or time.time() > timeout_at:\n # Timeout reached\n break\n if 'minions' in raw.get('data', {}):\n continue\n try:\n found.add(raw['id'])\n ret = {raw['id']: {'ret': raw['return']}}\n except KeyError:\n # Ignore other erroneous messages\n continue\n if 'out' in raw:\n ret[raw['id']]['out'] = raw['out']\n yield ret\n time.sleep(0.02)\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.get_cache_returns
python
def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret
Execute a single pass to gather the contents of the job cache
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L1405-L1431
null
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.get_cli_static_event_returns
python
def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret
Get the returns for the command line interface via the event system
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L1433-L1512
null
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.get_cli_event_returns
python
def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid)
Get the returns for the command line interface via the event system
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L1514-L1594
[ "def iteritems(d, **kw):\n return d.iteritems(**kw)\n", "def factory(opts, **kwargs):\n '''\n Creates and returns the cache class.\n If memory caching is enabled by opts MemCache class will be instantiated.\n If not Cache class will be returned.\n '''\n if opts.get('memcache_expire_seconds', 0):\n cls = MemCache\n else:\n cls = Cache\n return cls(opts, **kwargs)\n", "def contains(self, bank, key=None):\n '''\n Checks if the specified bank contains the specified key.\n\n :param bank:\n The name of the location inside the cache which will hold the key\n and its associated data.\n\n :param key:\n The name of the key (or file inside a directory) which will hold\n the data. File extensions should not be provided, as they will be\n added by the driver itself.\n\n :return:\n Returns True if the specified key exists in the given bank and False\n if not.\n If key is None checks for the bank existense.\n\n :raises SaltCacheError:\n Raises an exception if cache driver detected an error accessing data\n in the cache backend (auth, permissions, etc).\n '''\n fun = '{0}.contains'.format(self.driver)\n return self.modules[fun](bank, key, **self._kwargs)\n", "def get_iter_returns(\n self,\n jid,\n minions,\n timeout=None,\n tgt='*',\n tgt_type='glob',\n expect_minions=False,\n block=True,\n **kwargs):\n '''\n Watch the event system and return job data as it comes in\n\n :returns: all of the information for the JID\n '''\n if not isinstance(minions, set):\n if isinstance(minions, six.string_types):\n minions = set([minions])\n elif isinstance(minions, (list, tuple)):\n minions = set(list(minions))\n\n if timeout is None:\n timeout = self.opts['timeout']\n gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout']))\n start = int(time.time())\n\n # timeouts per minion, id_ -> timeout time\n minion_timeouts = {}\n\n found = set()\n missing = set()\n # Check to see if the jid is real, if not return the empty dict\n try:\n if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}:\n log.warning('jid does not exist')\n yield {}\n # stop the iteration, since the jid is invalid\n raise StopIteration()\n except Exception as exc:\n log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG)\n # Wait for the hosts to check in\n last_time = False\n # iterator for this job's return\n if self.opts['order_masters']:\n # If we are a MoM, we need to gather expected minions from downstreams masters.\n ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex')\n else:\n ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid))\n # iterator for the info of this job\n jinfo_iter = []\n # open event jids that need to be un-subscribed from later\n open_jids = set()\n timeout_at = time.time() + timeout\n gather_syndic_wait = time.time() + self.opts['syndic_wait']\n # are there still minions running the job out there\n # start as True so that we ping at least once\n minions_running = True\n log.debug(\n 'get_iter_returns for jid %s sent to %s will timeout at %s',\n jid, minions, datetime.fromtimestamp(timeout_at).time()\n )\n while True:\n # Process events until timeout is reached or all minions have returned\n for raw in ret_iter:\n # if we got None, then there were no events\n if raw is None:\n break\n if 'minions' in raw.get('data', {}):\n minions.update(raw['data']['minions'])\n if 'missing' in raw.get('data', {}):\n missing.update(raw['data']['missing'])\n continue\n if 'return' not in raw['data']:\n continue\n if kwargs.get('raw', False):\n found.add(raw['data']['id'])\n yield raw\n else:\n found.add(raw['data']['id'])\n ret = {raw['data']['id']: {'ret': raw['data']['return']}}\n if 'out' in raw['data']:\n ret[raw['data']['id']]['out'] = raw['data']['out']\n if 'retcode' in raw['data']:\n ret[raw['data']['id']]['retcode'] = raw['data']['retcode']\n if 'jid' in raw['data']:\n ret[raw['data']['id']]['jid'] = raw['data']['jid']\n if kwargs.get('_cmd_meta', False):\n ret[raw['data']['id']].update(raw['data'])\n log.debug('jid %s return from %s', jid, raw['data']['id'])\n yield ret\n\n # if we have all of the returns (and we aren't a syndic), no need for anything fancy\n if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']:\n # All minions have returned, break out of the loop\n log.debug('jid %s found all minions %s', jid, found)\n break\n elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']:\n if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait:\n # There were some minions to find and we found them\n # However, this does not imply that *all* masters have yet responded with expected minion lists.\n # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see\n # if additional lower-level masters deliver their lists of expected\n # minions.\n break\n # If we get here we may not have gathered the minion list yet. Keep waiting\n # for all lower-level masters to respond with their minion lists\n\n # let start the timeouts for all remaining minions\n\n for id_ in minions - found:\n # if we have a new minion in the list, make sure it has a timeout\n if id_ not in minion_timeouts:\n minion_timeouts[id_] = time.time() + timeout\n\n # if the jinfo has timed out and some minions are still running the job\n # re-do the ping\n if time.time() > timeout_at and minions_running:\n # since this is a new ping, no one has responded yet\n jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs)\n minions_running = False\n # if we weren't assigned any jid that means the master thinks\n # we have nothing to send\n if 'jid' not in jinfo:\n jinfo_iter = []\n else:\n jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid']))\n timeout_at = time.time() + gather_job_timeout\n # if you are a syndic, wait a little longer\n if self.opts['order_masters']:\n timeout_at += self.opts.get('syndic_wait', 1)\n\n # check for minions that are running the job still\n for raw in jinfo_iter:\n # if there are no more events, lets stop waiting for the jinfo\n if raw is None:\n break\n try:\n if raw['data']['retcode'] > 0:\n log.error('saltutil returning errors on minion %s', raw['data']['id'])\n minions.remove(raw['data']['id'])\n break\n except KeyError as exc:\n # This is a safe pass. We're just using the try/except to\n # avoid having to deep-check for keys.\n missing_key = exc.__str__().strip('\\'\"')\n if missing_key == 'retcode':\n log.debug('retcode missing from client return')\n else:\n log.debug(\n 'Passing on saltutil error. Key \\'%s\\' missing '\n 'from client return. This may be an error in '\n 'the client.', missing_key\n )\n # Keep track of the jid events to unsubscribe from later\n open_jids.add(jinfo['jid'])\n\n # TODO: move to a library??\n if 'minions' in raw.get('data', {}):\n minions.update(raw['data']['minions'])\n continue\n if 'syndic' in raw.get('data', {}):\n minions.update(raw['syndic'])\n continue\n if 'return' not in raw.get('data', {}):\n continue\n\n # if the job isn't running there anymore... don't count\n if raw['data']['return'] == {}:\n continue\n\n # if the minion throws an exception containing the word \"return\"\n # the master will try to handle the string as a dict in the next\n # step. Check if we have a string, log the issue and continue.\n if isinstance(raw['data']['return'], six.string_types):\n log.error(\"unexpected return from minion: %s\", raw)\n continue\n\n if 'return' in raw['data']['return'] and \\\n raw['data']['return']['return'] == {}:\n continue\n\n # if we didn't originally target the minion, lets add it to the list\n if raw['data']['id'] not in minions:\n minions.add(raw['data']['id'])\n # update this minion's timeout, as long as the job is still running\n minion_timeouts[raw['data']['id']] = time.time() + timeout\n # a minion returned, so we know its running somewhere\n minions_running = True\n\n # if we have hit gather_job_timeout (after firing the job) AND\n # if we have hit all minion timeouts, lets call it\n now = time.time()\n # if we have finished waiting, and no minions are running the job\n # then we need to see if each minion has timedout\n done = (now > timeout_at) and not minions_running\n if done:\n # if all minions have timeod out\n for id_ in minions - found:\n if now < minion_timeouts[id_]:\n done = False\n break\n if done:\n break\n\n # don't spin\n if block:\n time.sleep(0.01)\n else:\n yield\n\n # If there are any remaining open events, clean them up.\n if open_jids:\n for jid in open_jids:\n self.event.unsubscribe(jid)\n\n if expect_minions:\n for minion in list((minions - found)):\n yield {minion: {'failed': True}}\n\n # Filter out any minions marked as missing for which we received\n # returns (prevents false events sent due to higher-level masters not\n # knowing about lower-level minions).\n missing -= found\n\n # Report on missing minions\n if missing:\n for minion in missing:\n yield {minion: {'failed': True}}\n", "def _clean_up_subscriptions(self, job_id):\n if self.opts.get('order_masters'):\n self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex')\n self.event.unsubscribe('salt/job/{0}'.format(job_id))\n", "def connected_ids(self, subset=None, show_ip=False, show_ipv4=None, include_localhost=None):\n '''\n Return a set of all connected minion ids, optionally within a subset\n '''\n if include_localhost is not None:\n salt.utils.versions.warn_until(\n 'Sodium',\n 'The \\'include_localhost\\' argument is no longer required; any'\n 'connected localhost minion will always be included.'\n )\n if show_ipv4 is not None:\n salt.utils.versions.warn_until(\n 'Sodium',\n 'The \\'show_ipv4\\' argument has been renamed to \\'show_ip\\' as'\n 'it now also includes IPv6 addresses for IPv6-connected'\n 'minions.'\n )\n minions = set()\n if self.opts.get('minion_data_cache', False):\n search = self.cache.list('minions')\n if search is None:\n return minions\n addrs = salt.utils.network.local_port_tcp(int(self.opts['publish_port']))\n if '127.0.0.1' in addrs:\n # Add in the address of a possible locally-connected minion.\n addrs.discard('127.0.0.1')\n addrs.update(set(salt.utils.network.ip_addrs(include_loopback=False)))\n if '::1' in addrs:\n # Add in the address of a possible locally-connected minion.\n addrs.discard('::1')\n addrs.update(set(salt.utils.network.ip_addrs6(include_loopback=False)))\n if subset:\n search = subset\n for id_ in search:\n try:\n mdata = self.cache.fetch('minions/{0}'.format(id_), 'data')\n except SaltCacheError:\n # If a SaltCacheError is explicitly raised during the fetch operation,\n # permission was denied to open the cached data.p file. Continue on as\n # in the releases <= 2016.3. (An explicit error raise was added in PR\n # #35388. See issue #36867 for more information.\n continue\n if mdata is None:\n continue\n grains = mdata.get('grains', {})\n for ipv4 in grains.get('ipv4', []):\n if ipv4 in addrs:\n if show_ip:\n minions.add((id_, ipv4))\n else:\n minions.add(id_)\n break\n for ipv6 in grains.get('ipv6', []):\n if ipv6 in addrs:\n if show_ip:\n minions.add((id_, ipv6))\n else:\n minions.add(id_)\n break\n return minions\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.get_event_iter_returns
python
def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02)
Gather the return data from the event system, break hard when timeout is reached.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L1596-L1631
null
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient._prep_pub
python
def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs
Set up the payload_kwargs to be sent down to the master
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L1633-L1699
null
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']}) def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
LocalClient.pub_async
python
def pub_async(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, io_loop=None, listen=True, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.AsyncReqChannel.factory(self.opts, io_loop=io_loop, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = yield channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError: raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: raise tornado.gen.Return(payload) self.key = key payload_kwargs['key'] = self.key payload = yield channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: raise tornado.gen.Return(payload) # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() raise tornado.gen.Return({'jid': payload['load']['jid'], 'minions': payload['load']['minions']})
Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L1808-L1913
[ "def ip_bracket(addr):\n '''\n Convert IP address representation to ZMQ (URL) format. ZMQ expects\n brackets around IPv6 literals, since they are used in URLs.\n '''\n addr = ipaddress.ip_address(addr)\n return ('[{}]' if addr.version == 6 else '{}').format(addr)\n", "def factory(cls, opts, **kwargs):\n # Default to ZeroMQ for now\n ttype = 'zeromq'\n\n # determine the ttype\n if 'transport' in opts:\n ttype = opts['transport']\n elif 'transport' in opts.get('pillar', {}).get('master', {}):\n ttype = opts['pillar']['master']['transport']\n\n # switch on available ttypes\n if ttype == 'zeromq':\n import salt.transport.zeromq\n return salt.transport.zeromq.AsyncZeroMQReqChannel(opts, **kwargs)\n elif ttype == 'tcp':\n if not cls._resolver_configured:\n # TODO: add opt to specify number of resolver threads\n AsyncChannel._config_resolver()\n import salt.transport.tcp\n return salt.transport.tcp.AsyncTCPReqChannel(opts, **kwargs)\n elif ttype == 'local':\n import salt.transport.local\n return salt.transport.local.AsyncLocalChannel(opts, **kwargs)\n else:\n raise Exception(\n 'Channels are only defined for tcp, zeromq, and local'\n )\n" ]
class LocalClient(object): ''' The interface used by the :command:`salt` CLI tool on the Salt Master ``LocalClient`` is used to send a command to Salt minions to execute :ref:`execution modules <all-salt.modules>` and return the results to the Salt Master. Importing and using ``LocalClient`` must be done on the same machine as the Salt Master and it must be done using the same user that the Salt Master is running as. (Unless :conf_master:`external_auth` is configured and authentication credentials are included in the execution). .. note:: The LocalClient uses a Tornado IOLoop, this can create issues when using the LocalClient inside an existing IOLoop. If creating the LocalClient in partnership with another IOLoop either create the IOLoop before creating the LocalClient, or when creating the IOLoop use ioloop.current() which will return the ioloop created by LocalClient. .. code-block:: python import salt.client local = salt.client.LocalClient() local.cmd('*', 'test.fib', [10]) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'master'), mopts=None, skip_perm_errors=False, io_loop=None, keep_loop=False, auto_reconnect=False): ''' :param IOLoop io_loop: io_loop used for events. Pass in an io_loop if you want asynchronous operation for obtaining events. Eg use of set_event_handler() API. Otherwise, operation will be synchronous. ''' if mopts: self.opts = mopts else: if os.path.isdir(c_path): log.warning( '%s expects a file path not a directory path(%s) to ' 'its \'c_path\' keyword argument', self.__class__.__name__, c_path ) self.opts = salt.config.client_config(c_path) self.serial = salt.payload.Serial(self.opts) self.salt_user = salt.utils.user.get_specific_user() self.skip_perm_errors = skip_perm_errors self.key = self.__read_master_key() self.auto_reconnect = auto_reconnect self.event = salt.utils.event.get_event( 'master', self.opts['sock_dir'], self.opts['transport'], opts=self.opts, listen=False, io_loop=io_loop, keep_loop=keep_loop) self.utils = salt.loader.utils(self.opts) self.functions = salt.loader.minion_mods(self.opts, utils=self.utils) self.returners = salt.loader.returners(self.opts, self.functions) def __read_master_key(self): ''' Read in the rotating master authentication key ''' key_user = self.salt_user if key_user == 'root': if self.opts.get('user', 'root') != 'root': key_user = self.opts.get('user', 'root') if key_user.startswith('sudo_'): key_user = self.opts.get('user', 'root') if salt.utils.platform.is_windows(): # The username may contain '\' if it is in Windows # 'DOMAIN\username' format. Fix this for the keyfile path. key_user = key_user.replace('\\', '_') keyfile = os.path.join(self.opts['cachedir'], '.{0}_key'.format(key_user)) try: # Make sure all key parent directories are accessible salt.utils.verify.check_path_traversal(self.opts['cachedir'], key_user, self.skip_perm_errors) with salt.utils.files.fopen(keyfile, 'r') as key: return salt.utils.stringutils.to_unicode(key.read()) except (OSError, IOError, SaltClientError): # Fall back to eauth return '' def _convert_range_to_list(self, tgt): ''' convert a seco.range range into a list target ''' range_ = seco.range.Range(self.opts['range_server']) try: return range_.expand(tgt) except seco.range.RangeException as err: print('Range server exception: {0}'.format(err)) return [] def _get_timeout(self, timeout): ''' Return the timeout to use ''' if timeout is None: return self.opts['timeout'] if isinstance(timeout, int): return timeout if isinstance(timeout, six.string_types): try: return int(timeout) except ValueError: return self.opts['timeout'] # Looks like the timeout is invalid, use config return self.opts['timeout'] def gather_job_info(self, jid, tgt, tgt_type, listen=True, **kwargs): ''' Return the information about a given job ''' log.debug('Checking whether jid %s is still running', jid) timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) pub_data = self.run_job(tgt, 'saltutil.find_job', arg=[jid], tgt_type=tgt_type, timeout=timeout, listen=listen, **kwargs ) if 'jid' in pub_data: self.event.subscribe(pub_data['jid']) return pub_data def _check_pub_data(self, pub_data, listen=True): ''' Common checks on the pub_data data structure returned from running pub ''' if pub_data == '': # Failed to authenticate, this could be a bunch of things raise EauthAuthenticationError( 'Failed to authenticate! This is most likely because this ' 'user is not permitted to execute commands, but there is a ' 'small possibility that a disk error occurred (check ' 'disk/inode usage).' ) # Failed to connect to the master and send the pub if 'error' in pub_data: print(pub_data['error']) log.debug('_check_pub_data() error: %s', pub_data['error']) return {} elif 'jid' not in pub_data: return {} if pub_data['jid'] == '0': print('Failed to connect to the Master, ' 'is the Salt Master running?') return {} # If we order masters (via a syndic), don't short circuit if no minions # are found if not self.opts.get('order_masters'): # Check for no minions if not pub_data['minions']: print('No minions matched the target. ' 'No command was sent, no jid was assigned.') return {} # don't install event subscription listeners when the request is asynchronous # and doesn't care. this is important as it will create event leaks otherwise if not listen: return pub_data if self.opts.get('order_masters'): self.event.subscribe('syndic/.*/{0}'.format(pub_data['jid']), 'regex') self.event.subscribe('salt/job/{0}'.format(pub_data['jid'])) return pub_data def run_job( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=False, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = self.pub( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) return self._check_pub_data(pub_data, listen=listen) def gather_minions(self, tgt, expr_form): _res = salt.utils.minions.CkMinions(self.opts).check_minions(tgt, tgt_type=expr_form) return _res['minions'] @tornado.gen.coroutine def run_job_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', timeout=None, jid='', kwarg=None, listen=True, io_loop=None, **kwargs): ''' Asynchronously send a command to connected minions Prep the job directory and publish a command to any targeted minions. :return: A dictionary of (validated) ``pub_data`` or an empty dictionary on failure. The ``pub_data`` contains the job ID and a list of all minions that are expected to return data. .. code-block:: python >>> local.run_job_async('*', 'test.sleep', [300]) {'jid': '20131219215650131543', 'minions': ['jerry']} ''' arg = salt.utils.args.parse_input(arg, kwargs=kwarg) try: pub_data = yield self.pub_async( tgt, fun, arg, tgt_type, ret, jid=jid, timeout=self._get_timeout(timeout), io_loop=io_loop, listen=listen, **kwargs) except SaltClientError: # Re-raise error with specific message raise SaltClientError( 'The salt master could not be contacted. Is master running?' ) except AuthenticationError as err: raise AuthenticationError(err) except AuthorizationError as err: raise AuthorizationError(err) except Exception as general_exception: # Convert to generic client error and pass along message raise SaltClientError(general_exception) raise tornado.gen.Return(self._check_pub_data(pub_data, listen=listen)) def cmd_async( self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', kwarg=None, **kwargs): ''' Asynchronously send a command to connected minions The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A job ID or 0 on failure. .. code-block:: python >>> local.cmd_async('*', 'test.sleep', [300]) '20131219215921857715' ''' pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, jid=jid, kwarg=kwarg, listen=False, **kwargs) try: return pub_data['jid'] except KeyError: return 0 def cmd_subset( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, sub=3, cli=False, progress=False, full_return=False, **kwargs): ''' Execute a command on a random subset of the targeted systems The function signature is the same as :py:meth:`cmd` with the following exceptions. :param sub: The number of systems to execute on :param cli: When this is set to True, a generator is returned, otherwise a dictionary of the minion returns is returned .. code-block:: python >>> SLC.cmd_subset('*', 'test.ping', sub=1) {'jerry': True} ''' minion_ret = self.cmd(tgt, 'sys.list_functions', tgt_type=tgt_type, **kwargs) minions = list(minion_ret) random.shuffle(minions) f_tgt = [] for minion in minions: if fun in minion_ret[minion]: f_tgt.append(minion) if len(f_tgt) >= sub: break func = self.cmd if cli: func = self.cmd_cli return func( f_tgt, fun, arg, tgt_type='list', ret=ret, kwarg=kwarg, progress=progress, full_return=full_return, **kwargs) def cmd_batch( self, tgt, fun, arg=(), tgt_type='glob', ret='', kwarg=None, batch='10%', **kwargs): ''' Iteratively execute a command on subsets of minions at a time The function signature is the same as :py:meth:`cmd` with the following exceptions. :param batch: The batch identifier of systems to execute on :returns: A generator of minion returns .. code-block:: python >>> returns = local.cmd_batch('*', 'state.highstate', batch='10%') >>> for ret in returns: ... print(ret) {'jerry': {...}} {'dave': {...}} {'stewart': {...}} ''' # Late import - not used anywhere else in this file import salt.cli.batch opts = salt.cli.batch.batch_get_opts( tgt, fun, batch, self.opts, arg=arg, tgt_type=tgt_type, ret=ret, kwarg=kwarg, **kwargs) eauth = salt.cli.batch.batch_get_eauth(kwargs) arg = salt.utils.args.parse_input(arg, kwargs=kwarg) opts = {'tgt': tgt, 'fun': fun, 'arg': arg, 'tgt_type': tgt_type, 'ret': ret, 'batch': batch, 'failhard': kwargs.get('failhard', False), 'raw': kwargs.get('raw', False)} if 'timeout' in kwargs: opts['timeout'] = kwargs['timeout'] if 'gather_job_timeout' in kwargs: opts['gather_job_timeout'] = kwargs['gather_job_timeout'] if 'batch_wait' in kwargs: opts['batch_wait'] = int(kwargs['batch_wait']) eauth = {} if 'eauth' in kwargs: eauth['eauth'] = kwargs.pop('eauth') if 'username' in kwargs: eauth['username'] = kwargs.pop('username') if 'password' in kwargs: eauth['password'] = kwargs.pop('password') if 'token' in kwargs: eauth['token'] = kwargs.pop('token') for key, val in six.iteritems(self.opts): if key not in opts: opts[key] = val batch = salt.cli.batch.Batch(opts, eauth=eauth, quiet=True) for ret in batch.run(): yield ret def cmd(self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', jid='', full_return=False, kwarg=None, **kwargs): ''' Synchronously execute a command on targeted minions The cmd method will execute and wait for the timeout period for all minions to reply, then it will return all minion data at once. .. code-block:: python >>> import salt.client >>> local = salt.client.LocalClient() >>> local.cmd('*', 'cmd.run', ['whoami']) {'jerry': 'root'} With extra keyword arguments for the command function to be run: .. code-block:: python local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'}) Compound commands can be used for multiple executions in a single publish. Function names and function arguments are provided in separate lists but the index values must correlate and an empty list must be used if no arguments are required. .. code-block:: python >>> local.cmd('*', [ 'grains.items', 'sys.doc', 'cmd.run', ], [ [], [], ['uptime'], ]) :param tgt: Which minions to target for the execution. Default is shell glob. Modified by the ``tgt_type`` option. :type tgt: string or list :param fun: The module and function to call on the specified minions of the form ``module.function``. For example ``test.ping`` or ``grains.items``. Compound commands Multiple functions may be called in a single publish by passing a list of commands. This can dramatically lower overhead and speed up the application communicating with Salt. This requires that the ``arg`` param is a list of lists. The ``fun`` list and the ``arg`` list must correlate by index meaning a function that does not take arguments must still have a corresponding empty list at the expected index. :type fun: string or list of strings :param arg: A list of arguments to pass to the remote function. If the function takes no arguments ``arg`` may be omitted except when executing a compound command. :type arg: list or list-of-lists :param timeout: Seconds to wait after the last minion returns but before all minions return. :param tgt_type: The type of ``tgt``. Allowed values: * ``glob`` - Bash glob completion - Default * ``pcre`` - Perl style regular expression * ``list`` - Python list of hosts * ``grain`` - Match based on a grain comparison * ``grain_pcre`` - Grain comparison with a regex * ``pillar`` - Pillar data comparison * ``pillar_pcre`` - Pillar data comparison with a regex * ``nodegroup`` - Match on nodegroup * ``range`` - Use a Range server for matching * ``compound`` - Pass a compound match string * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address. .. versionchanged:: 2017.7.0 Renamed from ``expr_form`` to ``tgt_type`` :param ret: The returner to use. The value passed can be single returner, or a comma delimited list of returners to call in order on the minions :param kwarg: A dictionary with keyword arguments for the function. :param full_return: Output the job return only (default) or the full return including exit code and other job metadata. :param kwargs: Optional keyword arguments. Authentication credentials may be passed when using :conf_master:`external_auth`. For example: ``local.cmd('*', 'test.ping', username='saltdev', password='saltdev', eauth='pam')``. Or: ``local.cmd('*', 'test.ping', token='5871821ea51754fdcea8153c1c745433')`` :returns: A dictionary with the result of the execution, keyed by minion ID. A compound command will return a sub-dictionary keyed by function name. ''' was_listening = self.event.cpub try: pub_data = self.run_job(tgt, fun, arg, tgt_type, ret, timeout, jid, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data ret = {} for fn_ret in self.get_cli_event_returns( pub_data['jid'], pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, **kwargs): if fn_ret: for mid, data in six.iteritems(fn_ret): ret[mid] = (data if full_return else data.get('ret', {})) for failed in list(set(pub_data['minions']) - set(ret)): ret[failed] = False return ret finally: if not was_listening: self.event.close_pub() def cmd_cli( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, progress=False, **kwargs): ''' Used by the :command:`salt` CLI. This method returns minion returns as they come back and attempts to block until all minions return. The function signature is the same as :py:meth:`cmd` with the following exceptions. :param verbose: Print extra information about the running command :returns: A generator ''' was_listening = self.event.cpub if fun.startswith('state.'): ref = {'compound': '-C', 'glob': '', 'grain': '-G', 'grain_pcre': '-P', 'ipcidr': '-S', 'list': '-L', 'nodegroup': '-N', 'pcre': '-E', 'pillar': '-I', 'pillar_pcre': '-J'} if HAS_RANGE: ref['range'] = '-R' if ref[tgt_type].startswith('-'): self.target_data = "{0} '{1}'".format( ref[tgt_type], ','.join(tgt) if isinstance(tgt, list) else tgt) else: self.target_data = ','.join(tgt) if isinstance(tgt, list) else tgt else: self.target_data = '' try: self.pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not self.pub_data: yield self.pub_data else: try: for fn_ret in self.get_cli_event_returns( self.pub_data['jid'], self.pub_data['minions'], self._get_timeout(timeout), tgt, tgt_type, verbose, progress, **kwargs): if not fn_ret: continue yield fn_ret except KeyboardInterrupt: exit_msg = ( '\nExiting gracefully on Ctrl-c' '\n' 'This job\'s jid is: {0}\n' 'The minions may not have all finished running and any ' 'remaining minions will return upon completion.\n\n' 'To look up the return data for this job later, run the ' 'following command:\n' 'salt-run jobs.lookup_jid {0}'.format(self.pub_data['jid'])) if self.target_data: exit_msg += ( '\n\n' 'To set up the state run to safely exit, run the following command:\n' 'salt {0} state.soft_kill {1}'.format(self.target_data, self.pub_data['jid'])) raise SystemExit(exit_msg) finally: if not was_listening: self.event.close_pub() def cmd_iter( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, **kwargs): ''' Yields the individual minion returns as they come in The function signature is the same as :py:meth:`cmd` with the following exceptions. Normally :py:meth:`cmd_iter` does not yield results for minions that are not connected. If you want it to return results for disconnected minions set `expect_minions=True` in `kwargs`. :return: A generator yielding the individual minion returns .. code-block:: python >>> ret = local.cmd_iter('*', 'test.ping') >>> for i in ret: ... print(i) {'jerry': {'ret': True}} {'dave': {'ret': True}} {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: if kwargs.get('yield_pub_data'): yield pub_data for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=self._get_timeout(timeout), tgt=tgt, tgt_type=tgt_type, **kwargs): if not fn_ret: continue yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_iter_no_block( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', kwarg=None, show_jid=False, verbose=False, **kwargs): ''' Yields the individual minion returns as they come in, or None when no returns are available. The function signature is the same as :py:meth:`cmd` with the following exceptions. :returns: A generator yielding the individual minion returns, or None when no returns are available. This allows for actions to be injected in between minion returns. .. code-block:: python >>> ret = local.cmd_iter_no_block('*', 'test.ping') >>> for i in ret: ... print(i) None {'jerry': {'ret': True}} {'dave': {'ret': True}} None {'stewart': {'ret': True}} ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: yield pub_data else: for fn_ret in self.get_iter_returns(pub_data['jid'], pub_data['minions'], timeout=timeout, tgt=tgt, tgt_type=tgt_type, block=False, **kwargs): if fn_ret and any([show_jid, verbose]): for minion in fn_ret: fn_ret[minion]['jid'] = pub_data['jid'] yield fn_ret self._clean_up_subscriptions(pub_data['jid']) finally: if not was_listening: self.event.close_pub() def cmd_full_return( self, tgt, fun, arg=(), timeout=None, tgt_type='glob', ret='', verbose=False, kwarg=None, **kwargs): ''' Execute a salt command and return ''' was_listening = self.event.cpub try: pub_data = self.run_job( tgt, fun, arg, tgt_type, ret, timeout, kwarg=kwarg, listen=True, **kwargs) if not pub_data: return pub_data return (self.get_cli_static_event_returns(pub_data['jid'], pub_data['minions'], timeout, tgt, tgt_type, verbose)) finally: if not was_listening: self.event.close_pub() def get_cli_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_jid=False, **kwargs): ''' Starts a watcher looking at the return data for a specified JID :returns: all of the information for the JID ''' if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] fret = {} # make sure the minions is a set (since we do set operations on it) minions = set(minions) found = set() # start this before the cache lookup-- in case new stuff comes in event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) # get the info from the cache ret = self.get_cache_returns(jid) if ret != {}: found.update(set(ret)) yield ret # if you have all the returns, stop if len(found.intersection(minions)) >= len(minions): raise StopIteration() # otherwise, get them from the event system for event in event_iter: if event != {}: found.update(set(event)) yield event if len(found.intersection(minions)) >= len(minions): self._clean_up_subscriptions(jid) raise StopIteration() # TODO: tests!! def get_returns_no_block( self, tag, match_type=None): ''' Raw function to just return events of jid excluding timeout logic Yield either the raw event data or None Pass a list of additional regular expressions as `tags_regex` to search the event bus for non-return data, such as minion lists returned from syndics. ''' while True: raw = self.event.get_event(wait=0.01, tag=tag, match_type=match_type, full=True, no_block=True, auto_reconnect=self.auto_reconnect) yield raw def get_iter_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', expect_minions=False, block=True, **kwargs): ''' Watch the event system and return job data as it comes in :returns: all of the information for the JID ''' if not isinstance(minions, set): if isinstance(minions, six.string_types): minions = set([minions]) elif isinstance(minions, (list, tuple)): minions = set(list(minions)) if timeout is None: timeout = self.opts['timeout'] gather_job_timeout = int(kwargs.get('gather_job_timeout', self.opts['gather_job_timeout'])) start = int(time.time()) # timeouts per minion, id_ -> timeout time minion_timeouts = {} found = set() missing = set() # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() except Exception as exc: log.warning('Returner unavailable: %s', exc, exc_info_on_loglevel=logging.DEBUG) # Wait for the hosts to check in last_time = False # iterator for this job's return if self.opts['order_masters']: # If we are a MoM, we need to gather expected minions from downstreams masters. ret_iter = self.get_returns_no_block('(salt/job|syndic/.*)/{0}'.format(jid), 'regex') else: ret_iter = self.get_returns_no_block('salt/job/{0}'.format(jid)) # iterator for the info of this job jinfo_iter = [] # open event jids that need to be un-subscribed from later open_jids = set() timeout_at = time.time() + timeout gather_syndic_wait = time.time() + self.opts['syndic_wait'] # are there still minions running the job out there # start as True so that we ping at least once minions_running = True log.debug( 'get_iter_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) while True: # Process events until timeout is reached or all minions have returned for raw in ret_iter: # if we got None, then there were no events if raw is None: break if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) if 'missing' in raw.get('data', {}): missing.update(raw['data']['missing']) continue if 'return' not in raw['data']: continue if kwargs.get('raw', False): found.add(raw['data']['id']) yield raw else: found.add(raw['data']['id']) ret = {raw['data']['id']: {'ret': raw['data']['return']}} if 'out' in raw['data']: ret[raw['data']['id']]['out'] = raw['data']['out'] if 'retcode' in raw['data']: ret[raw['data']['id']]['retcode'] = raw['data']['retcode'] if 'jid' in raw['data']: ret[raw['data']['id']]['jid'] = raw['data']['jid'] if kwargs.get('_cmd_meta', False): ret[raw['data']['id']].update(raw['data']) log.debug('jid %s return from %s', jid, raw['data']['id']) yield ret # if we have all of the returns (and we aren't a syndic), no need for anything fancy if len(found.intersection(minions)) >= len(minions) and not self.opts['order_masters']: # All minions have returned, break out of the loop log.debug('jid %s found all minions %s', jid, found) break elif len(found.intersection(minions)) >= len(minions) and self.opts['order_masters']: if len(found) >= len(minions) and len(minions) > 0 and time.time() > gather_syndic_wait: # There were some minions to find and we found them # However, this does not imply that *all* masters have yet responded with expected minion lists. # Therefore, continue to wait up to the syndic_wait period (calculated in gather_syndic_wait) to see # if additional lower-level masters deliver their lists of expected # minions. break # If we get here we may not have gathered the minion list yet. Keep waiting # for all lower-level masters to respond with their minion lists # let start the timeouts for all remaining minions for id_ in minions - found: # if we have a new minion in the list, make sure it has a timeout if id_ not in minion_timeouts: minion_timeouts[id_] = time.time() + timeout # if the jinfo has timed out and some minions are still running the job # re-do the ping if time.time() > timeout_at and minions_running: # since this is a new ping, no one has responded yet jinfo = self.gather_job_info(jid, list(minions - found), 'list', **kwargs) minions_running = False # if we weren't assigned any jid that means the master thinks # we have nothing to send if 'jid' not in jinfo: jinfo_iter = [] else: jinfo_iter = self.get_returns_no_block('salt/job/{0}'.format(jinfo['jid'])) timeout_at = time.time() + gather_job_timeout # if you are a syndic, wait a little longer if self.opts['order_masters']: timeout_at += self.opts.get('syndic_wait', 1) # check for minions that are running the job still for raw in jinfo_iter: # if there are no more events, lets stop waiting for the jinfo if raw is None: break try: if raw['data']['retcode'] > 0: log.error('saltutil returning errors on minion %s', raw['data']['id']) minions.remove(raw['data']['id']) break except KeyError as exc: # This is a safe pass. We're just using the try/except to # avoid having to deep-check for keys. missing_key = exc.__str__().strip('\'"') if missing_key == 'retcode': log.debug('retcode missing from client return') else: log.debug( 'Passing on saltutil error. Key \'%s\' missing ' 'from client return. This may be an error in ' 'the client.', missing_key ) # Keep track of the jid events to unsubscribe from later open_jids.add(jinfo['jid']) # TODO: move to a library?? if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue if 'syndic' in raw.get('data', {}): minions.update(raw['syndic']) continue if 'return' not in raw.get('data', {}): continue # if the job isn't running there anymore... don't count if raw['data']['return'] == {}: continue # if the minion throws an exception containing the word "return" # the master will try to handle the string as a dict in the next # step. Check if we have a string, log the issue and continue. if isinstance(raw['data']['return'], six.string_types): log.error("unexpected return from minion: %s", raw) continue if 'return' in raw['data']['return'] and \ raw['data']['return']['return'] == {}: continue # if we didn't originally target the minion, lets add it to the list if raw['data']['id'] not in minions: minions.add(raw['data']['id']) # update this minion's timeout, as long as the job is still running minion_timeouts[raw['data']['id']] = time.time() + timeout # a minion returned, so we know its running somewhere minions_running = True # if we have hit gather_job_timeout (after firing the job) AND # if we have hit all minion timeouts, lets call it now = time.time() # if we have finished waiting, and no minions are running the job # then we need to see if each minion has timedout done = (now > timeout_at) and not minions_running if done: # if all minions have timeod out for id_ in minions - found: if now < minion_timeouts[id_]: done = False break if done: break # don't spin if block: time.sleep(0.01) else: yield # If there are any remaining open events, clean them up. if open_jids: for jid in open_jids: self.event.unsubscribe(jid) if expect_minions: for minion in list((minions - found)): yield {minion: {'failed': True}} # Filter out any minions marked as missing for which we received # returns (prevents false events sent due to higher-level masters not # knowing about lower-level minions). missing -= found # Report on missing minions if missing: for minion in missing: yield {minion: {'failed': True}} def get_returns( self, jid, minions, timeout=None): ''' Get the returns for the command line interface via the event system ''' minions = set(minions) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout log.debug( 'get_returns for jid %s sent to %s will timeout at %s', jid, minions, datetime.fromtimestamp(timeout_at).time() ) found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Master job cache returner [{0}] failed to verify jid. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: time_left = timeout_at - int(time.time()) wait = max(1, time_left) raw = self.event.get_event(wait, jid, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: found.add(raw['id']) ret[raw['id']] = raw['return'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop log.debug('jid %s found all minions', jid) break if int(time.time()) > timeout_at: log.info( 'jid %s minions %s did not return in time', jid, (minions - found) ) break time.sleep(0.01) return ret def get_full_returns(self, jid, minions, timeout=None): ''' This method starts off a watcher looking at the return data for a specified jid, it returns all of the information for the jid ''' # TODO: change this from ret to return... or the other way. # Its inconsistent, we should pick one ret = {} # create the iterator-- since we want to get anyone in the middle event_iter = self.get_event_iter_returns(jid, minions, timeout=timeout) try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Returner {0} could not fetch jid data. ' 'Exception details: {1}'.format( self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # if we have all the minion returns, lets just return if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise lets use the listener we created above to get the rest for event_ret in event_iter: # if nothing in the event_ret, skip if event_ret == {}: time.sleep(0.02) continue for minion, m_data in six.iteritems(event_ret): if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data # are we done yet? if len(set(ret).intersection(minions)) >= len(minions): return ret # otherwise we hit the timeout, return what we have return ret def get_cache_returns(self, jid): ''' Execute a single pass to gather the contents of the job cache ''' ret = {} try: data = self.returners['{0}.get_jid'.format(self.opts['master_job_cache'])](jid) except Exception as exc: raise SaltClientError('Could not examine master job cache. ' 'Error occurred in {0} returner. ' 'Exception details: {1}'.format(self.opts['master_job_cache'], exc)) for minion in data: m_data = {} if 'return' in data[minion]: m_data['ret'] = data[minion].get('return') else: m_data['ret'] = data[minion].get('return') if 'out' in data[minion]: m_data['out'] = data[minion]['out'] if minion in ret: ret[minion].update(m_data) else: ret[minion] = m_data return ret def get_cli_static_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, show_timeout=False, show_jid=False): ''' Get the returns for the command line interface via the event system ''' log.trace('entered - function get_cli_static_event_returns()') minions = set(minions) if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) if timeout is None: timeout = self.opts['timeout'] start = int(time.time()) timeout_at = start + timeout found = set() ret = {} # Check to see if the jid is real, if not return the empty dict try: if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') return ret except Exception as exc: raise SaltClientError('Load could not be retrieved from ' 'returner {0}. Exception details: {1}'.format( self.opts['master_job_cache'], exc)) # Wait for the hosts to check in while True: # Process events until timeout is reached or all minions have returned time_left = timeout_at - int(time.time()) # Wait 0 == forever, use a minimum of 1s wait = max(1, time_left) jid_tag = 'salt/job/{0}'.format(jid) raw = self.event.get_event(wait, jid_tag, auto_reconnect=self.auto_reconnect) if raw is not None and 'return' in raw: if 'minions' in raw.get('data', {}): minions.update(raw['data']['minions']) continue found.add(raw['id']) ret[raw['id']] = {'ret': raw['return']} ret[raw['id']]['success'] = raw.get('success', False) if 'out' in raw: ret[raw['id']]['out'] = raw['out'] if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break continue # Then event system timeout was reached and nothing was returned if len(found.intersection(minions)) >= len(minions): # All minions have returned, break out of the loop break if int(time.time()) > timeout_at: if verbose or show_timeout: if self.opts.get('minion_data_cache', False) \ or tgt_type in ('glob', 'pcre', 'list'): if len(found) < len(minions): fail = sorted(list(minions.difference(found))) for minion in fail: ret[minion] = { 'out': 'no_return', 'ret': 'Minion did not return' } break time.sleep(0.01) self._clean_up_subscriptions(jid) return ret def get_cli_event_returns( self, jid, minions, timeout=None, tgt='*', tgt_type='glob', verbose=False, progress=False, show_timeout=False, show_jid=False, **kwargs): ''' Get the returns for the command line interface via the event system ''' log.trace('func get_cli_event_returns()') if verbose: msg = 'Executing job with jid {0}'.format(jid) print(msg) print('-' * len(msg) + '\n') elif show_jid: print('jid: {0}'.format(jid)) # lazy load the connected minions connected_minions = None return_count = 0 for ret in self.get_iter_returns(jid, minions, timeout=timeout, tgt=tgt, tgt_type=tgt_type, # (gtmanfred) expect_minions is popped here incase it is passed from a client # call. If this is not popped, then it would be passed twice to # get_iter_returns. expect_minions=(kwargs.pop('expect_minions', False) or verbose or show_timeout), **kwargs ): log.debug('return event: %s', ret) return_count = return_count + 1 if progress: for id_, min_ret in six.iteritems(ret): if not min_ret.get('failed') is True: yield {'minion_count': len(minions), 'return_count': return_count} # replace the return structure for missing minions for id_, min_ret in six.iteritems(ret): if min_ret.get('failed') is True: if connected_minions is None: connected_minions = salt.utils.minions.CkMinions(self.opts).connected_ids() if self.opts['minion_data_cache'] \ and salt.cache.factory(self.opts).contains('minions/{0}'.format(id_), 'data') \ and connected_minions \ and id_ not in connected_minions: yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [Not connected]', 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: # don't report syndics as unresponsive minions if not os.path.exists(os.path.join(self.opts['syndic_dir'], id_)): yield { id_: { 'out': 'no_return', 'ret': 'Minion did not return. [No response]' '\nThe minions may not have all finished running and any ' 'remaining minions will return upon completion. To look ' 'up the return data for this job later, run the following ' 'command:\n\n' 'salt-run jobs.lookup_jid {0}'.format(jid), 'retcode': salt.defaults.exitcodes.EX_GENERIC } } else: yield {id_: min_ret} self._clean_up_subscriptions(jid) def get_event_iter_returns(self, jid, minions, timeout=None): ''' Gather the return data from the event system, break hard when timeout is reached. ''' log.trace('entered - function get_event_iter_returns()') if timeout is None: timeout = self.opts['timeout'] timeout_at = time.time() + timeout found = set() # Check to see if the jid is real, if not return the empty dict if self.returners['{0}.get_load'.format(self.opts['master_job_cache'])](jid) == {}: log.warning('jid does not exist') yield {} # stop the iteration, since the jid is invalid raise StopIteration() # Wait for the hosts to check in while True: raw = self.event.get_event(timeout, auto_reconnect=self.auto_reconnect) if raw is None or time.time() > timeout_at: # Timeout reached break if 'minions' in raw.get('data', {}): continue try: found.add(raw['id']) ret = {raw['id']: {'ret': raw['return']}} except KeyError: # Ignore other erroneous messages continue if 'out' in raw: ret[raw['id']]['out'] = raw['out'] yield ret time.sleep(0.02) def _prep_pub(self, tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs): ''' Set up the payload_kwargs to be sent down to the master ''' if tgt_type == 'nodegroup': if tgt not in self.opts['nodegroups']: conf_file = self.opts.get( 'conf_file', 'the master config file' ) raise SaltInvocationError( 'Node group {0} unavailable in {1}'.format( tgt, conf_file ) ) tgt = salt.utils.minions.nodegroup_comp(tgt, self.opts['nodegroups']) tgt_type = 'compound' # Convert a range expression to a list of nodes and change expression # form to list if tgt_type == 'range' and HAS_RANGE: tgt = self._convert_range_to_list(tgt) tgt_type = 'list' # If an external job cache is specified add it to the ret list if self.opts.get('ext_job_cache'): if ret: ret += ',{0}'.format(self.opts['ext_job_cache']) else: ret = self.opts['ext_job_cache'] # format the payload - make a function that does this in the payload # module # Generate the standard keyword args to feed to format_payload payload_kwargs = {'cmd': 'publish', 'tgt': tgt, 'fun': fun, 'arg': arg, 'key': self.key, 'tgt_type': tgt_type, 'ret': ret, 'jid': jid} # if kwargs are passed, pack them. if kwargs: payload_kwargs['kwargs'] = kwargs # If we have a salt user, add it to the payload if self.opts['syndic_master'] and 'user' in kwargs: payload_kwargs['user'] = kwargs['user'] elif self.salt_user: payload_kwargs['user'] = self.salt_user # If we're a syndication master, pass the timeout if self.opts['order_masters']: payload_kwargs['to'] = timeout return payload_kwargs def pub(self, tgt, fun, arg=(), tgt_type='glob', ret='', jid='', timeout=5, listen=False, **kwargs): ''' Take the required arguments and publish the given command. Arguments: tgt: The tgt is a regex or a glob used to match up the ids on the minions. Salt works by always publishing every command to all of the minions and then the minions determine if the command is for them based on the tgt value. fun: The function name to be called on the remote host(s), this must be a string in the format "<modulename>.<function name>" arg: The arg option needs to be a tuple of arguments to pass to the calling function, if left blank Returns: jid: A string, as returned by the publisher, which is the job id, this will inform the client where to get the job results minions: A set, the targets that the tgt passed should match. ''' # Make sure the publisher is running by checking the unix socket if (self.opts.get('ipc_mode', '') != 'tcp' and not os.path.exists(os.path.join(self.opts['sock_dir'], 'publish_pull.ipc'))): log.error( 'Unable to connect to the salt master publisher at %s', self.opts['sock_dir'] ) raise SaltClientError payload_kwargs = self._prep_pub( tgt, fun, arg, tgt_type, ret, jid, timeout, **kwargs) master_uri = 'tcp://' + salt.utils.zeromq.ip_bracket(self.opts['interface']) + \ ':' + six.text_type(self.opts['ret_port']) channel = salt.transport.client.ReqChannel.factory(self.opts, crypt='clear', master_uri=master_uri) try: # Ensure that the event subscriber is connected. # If not, we won't get a response, so error out if listen and not self.event.connect_pub(timeout=timeout): raise SaltReqTimeoutError() payload = channel.send(payload_kwargs, timeout=timeout) except SaltReqTimeoutError as err: log.error(err) raise SaltReqTimeoutError( 'Salt request timed out. The master is not responding. You ' 'may need to run your command with `--async` in order to ' 'bypass the congested event bus. With `--async`, the CLI tool ' 'will print the job id (jid) and exit immediately without ' 'listening for responses. You can then use ' '`salt-run jobs.lookup_jid` to look up the results of the job ' 'in the job cache later.' ) if not payload: # The master key could have changed out from under us! Regen # and try again if the key has changed key = self.__read_master_key() if key == self.key: return payload self.key = key payload_kwargs['key'] = self.key payload = channel.send(payload_kwargs) error = payload.pop('error', None) if error is not None: if isinstance(error, dict): err_name = error.get('name', '') err_msg = error.get('message', '') if err_name == 'AuthenticationError': raise AuthenticationError(err_msg) elif err_name == 'AuthorizationError': raise AuthorizationError(err_msg) raise PublishError(error) if not payload: return payload # We have the payload, let's get rid of the channel fast(GC'ed faster) channel.close() return {'jid': payload['load']['jid'], 'minions': payload['load']['minions']} @tornado.gen.coroutine def __del__(self): # This IS really necessary! # When running tests, if self.events is not destroyed, we leak 2 # threads per test case which uses self.client if hasattr(self, 'event'): # The call below will take care of calling 'self.event.destroy()' del self.event def _clean_up_subscriptions(self, job_id): if self.opts.get('order_masters'): self.event.unsubscribe('syndic/.*/{0}'.format(job_id), 'regex') self.event.unsubscribe('salt/job/{0}'.format(job_id))
saltstack/salt
salt/client/__init__.py
FunctionWrapper.__load_functions
python
def __load_functions(self): ''' Find out what functions are available on the minion ''' return set(self.local.cmd(self.minion, 'sys.list_functions').get(self.minion, []))
Find out what functions are available on the minion
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L1953-L1958
[ "def cmd(self,\n tgt,\n fun,\n arg=(),\n timeout=None,\n tgt_type='glob',\n ret='',\n jid='',\n full_return=False,\n kwarg=None,\n **kwargs):\n '''\n Synchronously execute a command on targeted minions\n\n The cmd method will execute and wait for the timeout period for all\n minions to reply, then it will return all minion data at once.\n\n .. code-block:: python\n\n >>> import salt.client\n >>> local = salt.client.LocalClient()\n >>> local.cmd('*', 'cmd.run', ['whoami'])\n {'jerry': 'root'}\n\n With extra keyword arguments for the command function to be run:\n\n .. code-block:: python\n\n local.cmd('*', 'test.arg', ['arg1', 'arg2'], kwarg={'foo': 'bar'})\n\n Compound commands can be used for multiple executions in a single\n publish. Function names and function arguments are provided in separate\n lists but the index values must correlate and an empty list must be\n used if no arguments are required.\n\n .. code-block:: python\n\n >>> local.cmd('*', [\n 'grains.items',\n 'sys.doc',\n 'cmd.run',\n ],\n [\n [],\n [],\n ['uptime'],\n ])\n\n :param tgt: Which minions to target for the execution. Default is shell\n glob. Modified by the ``tgt_type`` option.\n :type tgt: string or list\n\n :param fun: The module and function to call on the specified minions of\n the form ``module.function``. For example ``test.ping`` or\n ``grains.items``.\n\n Compound commands\n Multiple functions may be called in a single publish by\n passing a list of commands. This can dramatically lower\n overhead and speed up the application communicating with Salt.\n\n This requires that the ``arg`` param is a list of lists. The\n ``fun`` list and the ``arg`` list must correlate by index\n meaning a function that does not take arguments must still have\n a corresponding empty list at the expected index.\n :type fun: string or list of strings\n\n :param arg: A list of arguments to pass to the remote function. If the\n function takes no arguments ``arg`` may be omitted except when\n executing a compound command.\n :type arg: list or list-of-lists\n\n :param timeout: Seconds to wait after the last minion returns but\n before all minions return.\n\n :param tgt_type: The type of ``tgt``. Allowed values:\n\n * ``glob`` - Bash glob completion - Default\n * ``pcre`` - Perl style regular expression\n * ``list`` - Python list of hosts\n * ``grain`` - Match based on a grain comparison\n * ``grain_pcre`` - Grain comparison with a regex\n * ``pillar`` - Pillar data comparison\n * ``pillar_pcre`` - Pillar data comparison with a regex\n * ``nodegroup`` - Match on nodegroup\n * ``range`` - Use a Range server for matching\n * ``compound`` - Pass a compound match string\n * ``ipcidr`` - Match based on Subnet (CIDR notation) or IPv4 address.\n\n .. versionchanged:: 2017.7.0\n Renamed from ``expr_form`` to ``tgt_type``\n\n :param ret: The returner to use. The value passed can be single\n returner, or a comma delimited list of returners to call in order\n on the minions\n\n :param kwarg: A dictionary with keyword arguments for the function.\n\n :param full_return: Output the job return only (default) or the full\n return including exit code and other job metadata.\n\n :param kwargs: Optional keyword arguments.\n Authentication credentials may be passed when using\n :conf_master:`external_auth`.\n\n For example: ``local.cmd('*', 'test.ping', username='saltdev',\n password='saltdev', eauth='pam')``.\n Or: ``local.cmd('*', 'test.ping',\n token='5871821ea51754fdcea8153c1c745433')``\n\n :returns: A dictionary with the result of the execution, keyed by\n minion ID. A compound command will return a sub-dictionary keyed by\n function name.\n '''\n was_listening = self.event.cpub\n\n try:\n pub_data = self.run_job(tgt,\n fun,\n arg,\n tgt_type,\n ret,\n timeout,\n jid,\n kwarg=kwarg,\n listen=True,\n **kwargs)\n\n if not pub_data:\n return pub_data\n\n ret = {}\n for fn_ret in self.get_cli_event_returns(\n pub_data['jid'],\n pub_data['minions'],\n self._get_timeout(timeout),\n tgt,\n tgt_type,\n **kwargs):\n\n if fn_ret:\n for mid, data in six.iteritems(fn_ret):\n ret[mid] = (data if full_return\n else data.get('ret', {}))\n\n for failed in list(set(pub_data['minions']) - set(ret)):\n ret[failed] = False\n return ret\n finally:\n if not was_listening:\n self.event.close_pub()\n" ]
class FunctionWrapper(dict): ''' Create a function wrapper that looks like the functions dict on the minion but invoked commands on the minion via a LocalClient. This allows SLS files to be loaded with an object that calls down to the minion when the salt functions dict is referenced. ''' def __init__(self, opts, minion): super(FunctionWrapper, self).__init__() self.opts = opts self.minion = minion self.local = LocalClient(self.opts['conf_file']) self.functions = self.__load_functions() def __missing__(self, key): ''' Since the function key is missing, wrap this call to a command to the minion of said key if it is available in the self.functions set ''' if key not in self.functions: raise KeyError return self.run_key(key) def run_key(self, key): ''' Return a function that executes the arguments passed via the local client ''' def func(*args, **kwargs): ''' Run a remote call ''' args = list(args) for _key, _val in kwargs: args.append('{0}={1}'.format(_key, _val)) return self.local.cmd(self.minion, key, args) return func
saltstack/salt
salt/client/__init__.py
FunctionWrapper.run_key
python
def run_key(self, key): ''' Return a function that executes the arguments passed via the local client ''' def func(*args, **kwargs): ''' Run a remote call ''' args = list(args) for _key, _val in kwargs: args.append('{0}={1}'.format(_key, _val)) return self.local.cmd(self.minion, key, args) return func
Return a function that executes the arguments passed via the local client
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L1960-L1973
null
class FunctionWrapper(dict): ''' Create a function wrapper that looks like the functions dict on the minion but invoked commands on the minion via a LocalClient. This allows SLS files to be loaded with an object that calls down to the minion when the salt functions dict is referenced. ''' def __init__(self, opts, minion): super(FunctionWrapper, self).__init__() self.opts = opts self.minion = minion self.local = LocalClient(self.opts['conf_file']) self.functions = self.__load_functions() def __missing__(self, key): ''' Since the function key is missing, wrap this call to a command to the minion of said key if it is available in the self.functions set ''' if key not in self.functions: raise KeyError return self.run_key(key) def __load_functions(self): ''' Find out what functions are available on the minion ''' return set(self.local.cmd(self.minion, 'sys.list_functions').get(self.minion, []))
saltstack/salt
salt/client/__init__.py
Caller.cmd
python
def cmd(self, fun, *args, **kwargs): ''' Call an execution module with the given arguments and keyword arguments .. versionchanged:: 2015.8.0 Added the ``cmd`` method for consistency with the other Salt clients. The existing ``function`` and ``sminion.functions`` interfaces still exist but have been removed from the docs. .. code-block:: python caller.cmd('test.arg', 'Foo', 'Bar', baz='Baz') caller.cmd('event.send', 'myco/myevent/something', data={'foo': 'Foo'}, with_env=['GIT_COMMIT'], with_grains=True) ''' return self.sminion.functions[fun](*args, **kwargs)
Call an execution module with the given arguments and keyword arguments .. versionchanged:: 2015.8.0 Added the ``cmd`` method for consistency with the other Salt clients. The existing ``function`` and ``sminion.functions`` interfaces still exist but have been removed from the docs. .. code-block:: python caller.cmd('test.arg', 'Foo', 'Bar', baz='Baz') caller.cmd('event.send', 'myco/myevent/something', data={'foo': 'Foo'}, with_env=['GIT_COMMIT'], with_grains=True)
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L2024-L2040
null
class Caller(object): ''' ``Caller`` is the same interface used by the :command:`salt-call` command-line tool on the Salt Minion. .. versionchanged:: 2015.8.0 Added the ``cmd`` method for consistency with the other Salt clients. The existing ``function`` and ``sminion.functions`` interfaces still exist but have been removed from the docs. Importing and using ``Caller`` must be done on the same machine as a Salt Minion and it must be done using the same user that the Salt Minion is running as. Usage: .. code-block:: python import salt.client caller = salt.client.Caller() caller.cmd('test.ping') Note, a running master or minion daemon is not required to use this class. Running ``salt-call --local`` simply sets :conf_minion:`file_client` to ``'local'``. The same can be achieved at the Python level by including that setting in a minion config file. .. versionadded:: 2014.7.0 Pass the minion config as the ``mopts`` dictionary. .. code-block:: python import salt.client import salt.config __opts__ = salt.config.minion_config('/etc/salt/minion') __opts__['file_client'] = 'local' caller = salt.client.Caller(mopts=__opts__) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'minion'), mopts=None): # Late-import of the minion module to keep the CLI as light as possible import salt.minion if mopts: self.opts = mopts else: self.opts = salt.config.minion_config(c_path) self.sminion = salt.minion.SMinion(self.opts) def function(self, fun, *args, **kwargs): ''' Call a single salt function ''' func = self.sminion.functions[fun] args, kwargs = salt.minion.load_args_and_kwargs( func, salt.utils.args.parse_input(args, kwargs=kwargs),) return func(*args, **kwargs)
saltstack/salt
salt/client/__init__.py
Caller.function
python
def function(self, fun, *args, **kwargs): ''' Call a single salt function ''' func = self.sminion.functions[fun] args, kwargs = salt.minion.load_args_and_kwargs( func, salt.utils.args.parse_input(args, kwargs=kwargs),) return func(*args, **kwargs)
Call a single salt function
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L2042-L2050
[ "def parse_input(args, kwargs=None, condition=True, no_parse=None):\n '''\n Parse out the args and kwargs from a list of input values. Optionally,\n return the args and kwargs without passing them to condition_input().\n\n Don't pull args with key=val apart if it has a newline in it.\n '''\n if no_parse is None:\n no_parse = ()\n if kwargs is None:\n kwargs = {}\n _args = []\n _kwargs = {}\n for arg in args:\n if isinstance(arg, six.string_types):\n arg_name, arg_value = parse_kwarg(arg)\n if arg_name:\n _kwargs[arg_name] = yamlify_arg(arg_value) \\\n if arg_name not in no_parse \\\n else arg_value\n else:\n _args.append(yamlify_arg(arg))\n elif isinstance(arg, dict):\n # Yes, we're popping this key off and adding it back if\n # condition_input is called below, but this is the only way to\n # gracefully handle both CLI and API input.\n if arg.pop('__kwarg__', False) is True:\n _kwargs.update(arg)\n else:\n _args.append(arg)\n else:\n _args.append(arg)\n _kwargs.update(kwargs)\n if condition:\n return condition_input(_args, _kwargs)\n return _args, _kwargs\n", "def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):\n '''\n Detect the args and kwargs that need to be passed to a function call, and\n check them against what was passed.\n '''\n argspec = salt.utils.args.get_function_argspec(func)\n _args = []\n _kwargs = {}\n invalid_kwargs = []\n\n for arg in args:\n if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:\n # if the arg is a dict with __kwarg__ == True, then its a kwarg\n for key, val in six.iteritems(arg):\n if argspec.keywords or key in argspec.args:\n # Function supports **kwargs or is a positional argument to\n # the function.\n _kwargs[key] = val\n else:\n # **kwargs not in argspec and parsed argument name not in\n # list of positional arguments. This keyword argument is\n # invalid.\n invalid_kwargs.append('{0}={1}'.format(key, val))\n continue\n\n else:\n string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632\n if string_kwarg:\n if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:\n # Function supports **kwargs or is a positional argument to\n # the function.\n _kwargs.update(string_kwarg)\n else:\n # **kwargs not in argspec and parsed argument name not in\n # list of positional arguments. This keyword argument is\n # invalid.\n for key, val in six.iteritems(string_kwarg):\n invalid_kwargs.append('{0}={1}'.format(key, val))\n else:\n _args.append(arg)\n\n if invalid_kwargs and not ignore_invalid:\n salt.utils.args.invalid_kwargs(invalid_kwargs)\n\n if argspec.keywords and isinstance(data, dict):\n # this function accepts **kwargs, pack in the publish data\n for key, val in six.iteritems(data):\n _kwargs['__pub_{0}'.format(key)] = val\n\n return _args, _kwargs\n" ]
class Caller(object): ''' ``Caller`` is the same interface used by the :command:`salt-call` command-line tool on the Salt Minion. .. versionchanged:: 2015.8.0 Added the ``cmd`` method for consistency with the other Salt clients. The existing ``function`` and ``sminion.functions`` interfaces still exist but have been removed from the docs. Importing and using ``Caller`` must be done on the same machine as a Salt Minion and it must be done using the same user that the Salt Minion is running as. Usage: .. code-block:: python import salt.client caller = salt.client.Caller() caller.cmd('test.ping') Note, a running master or minion daemon is not required to use this class. Running ``salt-call --local`` simply sets :conf_minion:`file_client` to ``'local'``. The same can be achieved at the Python level by including that setting in a minion config file. .. versionadded:: 2014.7.0 Pass the minion config as the ``mopts`` dictionary. .. code-block:: python import salt.client import salt.config __opts__ = salt.config.minion_config('/etc/salt/minion') __opts__['file_client'] = 'local' caller = salt.client.Caller(mopts=__opts__) ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'minion'), mopts=None): # Late-import of the minion module to keep the CLI as light as possible import salt.minion if mopts: self.opts = mopts else: self.opts = salt.config.minion_config(c_path) self.sminion = salt.minion.SMinion(self.opts) def cmd(self, fun, *args, **kwargs): ''' Call an execution module with the given arguments and keyword arguments .. versionchanged:: 2015.8.0 Added the ``cmd`` method for consistency with the other Salt clients. The existing ``function`` and ``sminion.functions`` interfaces still exist but have been removed from the docs. .. code-block:: python caller.cmd('test.arg', 'Foo', 'Bar', baz='Baz') caller.cmd('event.send', 'myco/myevent/something', data={'foo': 'Foo'}, with_env=['GIT_COMMIT'], with_grains=True) ''' return self.sminion.functions[fun](*args, **kwargs)
saltstack/salt
salt/client/__init__.py
ProxyCaller.cmd
python
def cmd(self, fun, *args, **kwargs): ''' Call an execution module with the given arguments and keyword arguments .. code-block:: python caller.cmd('test.arg', 'Foo', 'Bar', baz='Baz') caller.cmd('event.send', 'myco/myevent/something', data={'foo': 'Foo'}, with_env=['GIT_COMMIT'], with_grains=True) ''' func = self.sminion.functions[fun] data = { 'arg': args, 'fun': fun } data.update(kwargs) executors = getattr(self.sminion, 'module_executors', []) or \ self.opts.get('module_executors', ['direct_call']) if isinstance(executors, six.string_types): executors = [executors] for name in executors: fname = '{0}.execute'.format(name) if fname not in self.sminion.executors: raise SaltInvocationError("Executor '{0}' is not available".format(name)) return_data = self.sminion.executors[fname](self.opts, data, func, args, kwargs) if return_data is not None: break return return_data
Call an execution module with the given arguments and keyword arguments .. code-block:: python caller.cmd('test.arg', 'Foo', 'Bar', baz='Baz') caller.cmd('event.send', 'myco/myevent/something', data={'foo': 'Foo'}, with_env=['GIT_COMMIT'], with_grains=True)
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/__init__.py#L2097-L2125
null
class ProxyCaller(object): ''' ``ProxyCaller`` is the same interface used by the :command:`salt-call` with the args ``--proxyid <proxyid>`` command-line tool on the Salt Proxy Minion. Importing and using ``ProxyCaller`` must be done on the same machine as a Salt Minion and it must be done using the same user that the Salt Minion is running as. Usage: .. code-block:: python import salt.client caller = salt.client.Caller() caller.cmd('test.ping') Note, a running master or minion daemon is not required to use this class. Running ``salt-call --local`` simply sets :conf_minion:`file_client` to ``'local'``. The same can be achieved at the Python level by including that setting in a minion config file. .. code-block:: python import salt.client import salt.config __opts__ = salt.config.proxy_config('/etc/salt/proxy', minion_id='quirky_edison') __opts__['file_client'] = 'local' caller = salt.client.ProxyCaller(mopts=__opts__) .. note:: To use this for calling proxies, the :py:func:`is_proxy functions <salt.utils.platform.is_proxy>` requires that ``--proxyid`` be an argument on the commandline for the script this is used in, or that the string ``proxy`` is in the name of the script. ''' def __init__(self, c_path=os.path.join(syspaths.CONFIG_DIR, 'proxy'), mopts=None): # Late-import of the minion module to keep the CLI as light as possible import salt.minion self.opts = mopts or salt.config.proxy_config(c_path) self.sminion = salt.minion.SProxyMinion(self.opts)
saltstack/salt
salt/modules/win_smtp_server.py
_get_wmi_setting
python
def _get_wmi_setting(wmi_class_name, setting, server): ''' Get the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class([setting], Name=server)[0] ret = getattr(objs, setting) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) return ret
Get the value of the setting for the provided class.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_smtp_server.py#L58-L73
null
# -*- coding: utf-8 -*- ''' Module for managing IIS SMTP server configuration on Windows servers. The Windows features 'SMTP-Server' and 'Web-WMI' must be installed. :depends: wmi ''' # IIS metabase configuration settings: # https://goo.gl/XCt1uO # IIS logging options: # https://goo.gl/RL8ki9 # https://goo.gl/iwnDow # MicrosoftIISv2 namespace in Windows 2008r2 and later: # http://goo.gl/O4m48T # Connection and relay IPs in PowerShell: # https://goo.gl/aBMZ9K # http://goo.gl/MrybFq # Import python libs from __future__ import absolute_import, unicode_literals, print_function import logging import re # Import Salt libs from salt.exceptions import SaltInvocationError import salt.utils.args import salt.utils.platform # Import 3rd-party libs from salt.ext import six try: import wmi import salt.utils.winapi _HAS_MODULE_DEPENDENCIES = True except ImportError: _HAS_MODULE_DEPENDENCIES = False _DEFAULT_SERVER = 'SmtpSvc/1' _WMI_NAMESPACE = 'MicrosoftIISv2' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_smtp_server' def __virtual__(): ''' Only works on Windows systems. ''' if salt.utils.platform.is_windows() and _HAS_MODULE_DEPENDENCIES: return __virtualname__ return False def _set_wmi_setting(wmi_class_name, setting, value, server): ''' Set the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) try: setattr(objs, setting, value) return True except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) return False def _normalize_server_settings(**settings): ''' Convert setting values that had been improperly converted to a dict back to a string. ''' ret = dict() settings = salt.utils.args.clean_kwargs(**settings) for setting in settings: if isinstance(settings[setting], dict): _LOG.debug('Fixing value: %s', settings[setting]) value_from_key = next(six.iterkeys(settings[setting])) ret[setting] = "{{{0}}}".format(value_from_key) else: ret[setting] = settings[setting] return ret def get_log_format_types(): ''' Get all available log format names and ids. :return: A dictionary of the log format names and ids. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format_types ''' ret = dict() prefix = 'logging/' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IISLogModuleSetting() # Remove the prefix from the name. for obj in objs: name = six.text_type(obj.Name).replace(prefix, '', 1) ret[name] = six.text_type(obj.LogModuleId) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IISLogModuleSetting: %s', error) if not ret: _LOG.error('Unable to get log format types.') return ret def get_servers(): ''' Get the SMTP virtual server names. :return: A list of the SMTP virtual servers. :rtype: list CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_servers ''' ret = list() with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting() for obj in objs: ret.append(six.text_type(obj.Name)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) _LOG.debug('Found SMTP servers: %s', ret) return ret def get_server_setting(settings, server=_DEFAULT_SERVER): ''' Get the value of the setting for the SMTP virtual server. :param str settings: A list of the setting names. :param str server: The SMTP server name. :return: A dictionary of the provided settings and their values. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_server_setting settings="['MaxRecipients']" ''' ret = dict() if not settings: _LOG.warning('No settings provided.') return ret with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(settings, Name=server)[0] for setting in settings: ret[setting] = six.text_type(getattr(objs, setting)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) return ret def set_server_setting(settings, server=_DEFAULT_SERVER): ''' Set the value of the setting for the SMTP virtual server. .. note:: The setting names are case-sensitive. :param str settings: A dictionary of the setting names and their values. :param str server: The SMTP server name. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_server_setting settings="{'MaxRecipients': '500'}" ''' if not settings: _LOG.warning('No settings provided') return False # Some fields are formatted like '{data}'. Salt tries to convert these to dicts # automatically on input, so convert them back to the proper format. settings = _normalize_server_settings(**settings) current_settings = get_server_setting(settings=settings.keys(), server=server) if settings == current_settings: _LOG.debug('Settings already contain the provided values.') return True # Note that we must fetch all properties of IIsSmtpServerSetting below, since # filtering for specific properties and then attempting to set them will cause # an error like: wmi.x_wmi Unexpected COM Error -2147352567 with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) for setting in settings: if six.text_type(settings[setting]) != six.text_type(current_settings[setting]): try: setattr(objs, setting, settings[setting]) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) # Get the settings post-change so that we can verify tht all properties # were modified successfully. Track the ones that weren't. new_settings = get_server_setting(settings=settings.keys(), server=server) failed_settings = dict() for setting in settings: if six.text_type(settings[setting]) != six.text_type(new_settings[setting]): failed_settings[setting] = settings[setting] if failed_settings: _LOG.error('Failed to change settings: %s', failed_settings) return False _LOG.debug('Settings configured successfully: %s', settings.keys()) return True def get_log_format(server=_DEFAULT_SERVER): ''' Get the active log format for the SMTP virtual server. :param str server: The SMTP server name. :return: A string of the log format name. :rtype: str CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format ''' log_format_types = get_log_format_types() format_id = _get_wmi_setting('IIsSmtpServerSetting', 'LogPluginClsid', server) # Since IIsSmtpServerSetting stores the log type as an id, we need # to get the mapping from IISLogModuleSetting and extract the name. for key in log_format_types: if six.text_type(format_id) == log_format_types[key]: return key _LOG.warning('Unable to determine log format.') return None def set_log_format(log_format, server=_DEFAULT_SERVER): ''' Set the active log format for the SMTP virtual server. :param str log_format: The log format name. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_log_format 'Microsoft IIS Log File Format' ''' setting = 'LogPluginClsid' log_format_types = get_log_format_types() format_id = log_format_types.get(log_format, None) if not format_id: message = ("Invalid log format '{0}' specified. Valid formats:" ' {1}').format(log_format, log_format_types.keys()) raise SaltInvocationError(message) _LOG.debug("Id for '%s' found: %s", log_format, format_id) current_log_format = get_log_format(server) if log_format == current_log_format: _LOG.debug('%s already contains the provided format.', setting) return True _set_wmi_setting('IIsSmtpServerSetting', setting, format_id, server) new_log_format = get_log_format(server) ret = log_format == new_log_format if ret: _LOG.debug("Setting %s configured successfully: %s", setting, log_format) else: _LOG.error("Unable to configure %s with value: %s", setting, log_format) return ret def get_connection_ip_list(as_wmi_format=False, server=_DEFAULT_SERVER): ''' Get the IPGrant list for the SMTP virtual server. :param bool as_wmi_format: Returns the connection IPs as a list in the format WMI expects. :param str server: The SMTP server name. :return: A dictionary of the IP and subnet pairs. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_connection_ip_list ''' ret = dict() setting = 'IPGrant' reg_separator = r',\s*' if as_wmi_format: ret = list() addresses = _get_wmi_setting('IIsIPSecuritySetting', setting, server) # WMI returns the addresses as a tuple of unicode strings, each representing # an address/subnet pair. Remove extra spaces that may be present. for unnormalized_address in addresses: ip_address, subnet = re.split(reg_separator, unnormalized_address) if as_wmi_format: ret.append('{0}, {1}'.format(ip_address, subnet)) else: ret[ip_address] = subnet if not ret: _LOG.debug('%s is empty.', setting) return ret def set_connection_ip_list(addresses=None, grant_by_default=False, server=_DEFAULT_SERVER): ''' Set the IPGrant list for the SMTP virtual server. :param str addresses: A dictionary of IP + subnet pairs. :param bool grant_by_default: Whether the addresses should be a blacklist or whitelist. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}" ''' setting = 'IPGrant' formatted_addresses = list() # It's okay to accept an empty list for set_connection_ip_list, # since an empty list may be desirable. if not addresses: addresses = dict() _LOG.debug('Empty %s specified.', setting) # Convert addresses to the 'ip_address, subnet' format used by # IIsIPSecuritySetting. for address in addresses: formatted_addresses.append('{0}, {1}'.format(address.strip(), addresses[address].strip())) current_addresses = get_connection_ip_list(as_wmi_format=True, server=server) # Order is not important, so compare to the current addresses as unordered sets. if set(formatted_addresses) == set(current_addresses): _LOG.debug('%s already contains the provided addresses.', setting) return True # First we should check GrantByDefault, and change it if necessary. current_grant_by_default = _get_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', server) if grant_by_default != current_grant_by_default: _LOG.debug('Setting GrantByDefault to: %s', grant_by_default) _set_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', grant_by_default, server) _set_wmi_setting('IIsIPSecuritySetting', setting, formatted_addresses, server) new_addresses = get_connection_ip_list(as_wmi_format=True, server=server) ret = set(formatted_addresses) == set(new_addresses) if ret: _LOG.debug('%s configured successfully: %s', setting, formatted_addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, formatted_addresses) return ret def get_relay_ip_list(server=_DEFAULT_SERVER): ''' Get the RelayIpList list for the SMTP virtual server. :param str server: The SMTP server name. :return: A list of the relay IPs. :rtype: list .. note:: A return value of None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_relay_ip_list ''' ret = list() setting = 'RelayIpList' lines = _get_wmi_setting('IIsSmtpServerSetting', setting, server) if not lines: _LOG.debug('%s is empty: %s', setting, lines) if lines is None: lines = [None] return list(lines) # WMI returns the addresses as a tuple of individual octets, so we # need to group them and reassemble them into IP addresses. i = 0 while i < len(lines): octets = [six.text_type(x) for x in lines[i: i + 4]] address = '.'.join(octets) ret.append(address) i += 4 return ret def set_relay_ip_list(addresses=None, server=_DEFAULT_SERVER): ''' Set the RelayIpList list for the SMTP virtual server. Due to the unusual way that Windows stores the relay IPs, it is advisable to retrieve the existing list you wish to set from a pre-configured server. For example, setting '127.0.0.1' as an allowed relay IP through the GUI would generate an actual relay IP list similar to the following: .. code-block:: cfg ['24.0.0.128', '32.0.0.128', '60.0.0.128', '68.0.0.128', '1.0.0.0', '76.0.0.0', '0.0.0.0', '0.0.0.0', '1.0.0.0', '1.0.0.0', '2.0.0.0', '2.0.0.0', '4.0.0.0', '0.0.0.0', '76.0.0.128', '0.0.0.0', '0.0.0.0', '0.0.0.0', '0.0.0.0', '255.255.255.255', '127.0.0.1'] .. note:: Setting the list to None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list configured, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. :param str addresses: A list of the relay IPs. The order of the list is important. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_relay_ip_list addresses="['192.168.1.1', '172.16.1.1']" ''' setting = 'RelayIpList' formatted_addresses = list() current_addresses = get_relay_ip_list(server) if list(addresses) == current_addresses: _LOG.debug('%s already contains the provided addresses.', setting) return True if addresses: # The WMI input data needs to be in the format used by RelayIpList. Order # is also important due to the way RelayIpList orders the address list. if addresses[0] is None: formatted_addresses = None else: for address in addresses: for octet in address.split('.'): formatted_addresses.append(octet) _LOG.debug('Formatted %s addresses: %s', setting, formatted_addresses) _set_wmi_setting('IIsSmtpServerSetting', setting, formatted_addresses, server) new_addresses = get_relay_ip_list(server) ret = list(addresses) == new_addresses if ret: _LOG.debug('%s configured successfully: %s', setting, addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, addresses) return ret
saltstack/salt
salt/modules/win_smtp_server.py
_set_wmi_setting
python
def _set_wmi_setting(wmi_class_name, setting, value, server): ''' Set the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) try: setattr(objs, setting, value) return True except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) return False
Set the value of the setting for the provided class.
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_smtp_server.py#L76-L98
null
# -*- coding: utf-8 -*- ''' Module for managing IIS SMTP server configuration on Windows servers. The Windows features 'SMTP-Server' and 'Web-WMI' must be installed. :depends: wmi ''' # IIS metabase configuration settings: # https://goo.gl/XCt1uO # IIS logging options: # https://goo.gl/RL8ki9 # https://goo.gl/iwnDow # MicrosoftIISv2 namespace in Windows 2008r2 and later: # http://goo.gl/O4m48T # Connection and relay IPs in PowerShell: # https://goo.gl/aBMZ9K # http://goo.gl/MrybFq # Import python libs from __future__ import absolute_import, unicode_literals, print_function import logging import re # Import Salt libs from salt.exceptions import SaltInvocationError import salt.utils.args import salt.utils.platform # Import 3rd-party libs from salt.ext import six try: import wmi import salt.utils.winapi _HAS_MODULE_DEPENDENCIES = True except ImportError: _HAS_MODULE_DEPENDENCIES = False _DEFAULT_SERVER = 'SmtpSvc/1' _WMI_NAMESPACE = 'MicrosoftIISv2' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_smtp_server' def __virtual__(): ''' Only works on Windows systems. ''' if salt.utils.platform.is_windows() and _HAS_MODULE_DEPENDENCIES: return __virtualname__ return False def _get_wmi_setting(wmi_class_name, setting, server): ''' Get the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class([setting], Name=server)[0] ret = getattr(objs, setting) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) return ret def _normalize_server_settings(**settings): ''' Convert setting values that had been improperly converted to a dict back to a string. ''' ret = dict() settings = salt.utils.args.clean_kwargs(**settings) for setting in settings: if isinstance(settings[setting], dict): _LOG.debug('Fixing value: %s', settings[setting]) value_from_key = next(six.iterkeys(settings[setting])) ret[setting] = "{{{0}}}".format(value_from_key) else: ret[setting] = settings[setting] return ret def get_log_format_types(): ''' Get all available log format names and ids. :return: A dictionary of the log format names and ids. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format_types ''' ret = dict() prefix = 'logging/' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IISLogModuleSetting() # Remove the prefix from the name. for obj in objs: name = six.text_type(obj.Name).replace(prefix, '', 1) ret[name] = six.text_type(obj.LogModuleId) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IISLogModuleSetting: %s', error) if not ret: _LOG.error('Unable to get log format types.') return ret def get_servers(): ''' Get the SMTP virtual server names. :return: A list of the SMTP virtual servers. :rtype: list CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_servers ''' ret = list() with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting() for obj in objs: ret.append(six.text_type(obj.Name)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) _LOG.debug('Found SMTP servers: %s', ret) return ret def get_server_setting(settings, server=_DEFAULT_SERVER): ''' Get the value of the setting for the SMTP virtual server. :param str settings: A list of the setting names. :param str server: The SMTP server name. :return: A dictionary of the provided settings and their values. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_server_setting settings="['MaxRecipients']" ''' ret = dict() if not settings: _LOG.warning('No settings provided.') return ret with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(settings, Name=server)[0] for setting in settings: ret[setting] = six.text_type(getattr(objs, setting)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) return ret def set_server_setting(settings, server=_DEFAULT_SERVER): ''' Set the value of the setting for the SMTP virtual server. .. note:: The setting names are case-sensitive. :param str settings: A dictionary of the setting names and their values. :param str server: The SMTP server name. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_server_setting settings="{'MaxRecipients': '500'}" ''' if not settings: _LOG.warning('No settings provided') return False # Some fields are formatted like '{data}'. Salt tries to convert these to dicts # automatically on input, so convert them back to the proper format. settings = _normalize_server_settings(**settings) current_settings = get_server_setting(settings=settings.keys(), server=server) if settings == current_settings: _LOG.debug('Settings already contain the provided values.') return True # Note that we must fetch all properties of IIsSmtpServerSetting below, since # filtering for specific properties and then attempting to set them will cause # an error like: wmi.x_wmi Unexpected COM Error -2147352567 with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) for setting in settings: if six.text_type(settings[setting]) != six.text_type(current_settings[setting]): try: setattr(objs, setting, settings[setting]) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) # Get the settings post-change so that we can verify tht all properties # were modified successfully. Track the ones that weren't. new_settings = get_server_setting(settings=settings.keys(), server=server) failed_settings = dict() for setting in settings: if six.text_type(settings[setting]) != six.text_type(new_settings[setting]): failed_settings[setting] = settings[setting] if failed_settings: _LOG.error('Failed to change settings: %s', failed_settings) return False _LOG.debug('Settings configured successfully: %s', settings.keys()) return True def get_log_format(server=_DEFAULT_SERVER): ''' Get the active log format for the SMTP virtual server. :param str server: The SMTP server name. :return: A string of the log format name. :rtype: str CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format ''' log_format_types = get_log_format_types() format_id = _get_wmi_setting('IIsSmtpServerSetting', 'LogPluginClsid', server) # Since IIsSmtpServerSetting stores the log type as an id, we need # to get the mapping from IISLogModuleSetting and extract the name. for key in log_format_types: if six.text_type(format_id) == log_format_types[key]: return key _LOG.warning('Unable to determine log format.') return None def set_log_format(log_format, server=_DEFAULT_SERVER): ''' Set the active log format for the SMTP virtual server. :param str log_format: The log format name. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_log_format 'Microsoft IIS Log File Format' ''' setting = 'LogPluginClsid' log_format_types = get_log_format_types() format_id = log_format_types.get(log_format, None) if not format_id: message = ("Invalid log format '{0}' specified. Valid formats:" ' {1}').format(log_format, log_format_types.keys()) raise SaltInvocationError(message) _LOG.debug("Id for '%s' found: %s", log_format, format_id) current_log_format = get_log_format(server) if log_format == current_log_format: _LOG.debug('%s already contains the provided format.', setting) return True _set_wmi_setting('IIsSmtpServerSetting', setting, format_id, server) new_log_format = get_log_format(server) ret = log_format == new_log_format if ret: _LOG.debug("Setting %s configured successfully: %s", setting, log_format) else: _LOG.error("Unable to configure %s with value: %s", setting, log_format) return ret def get_connection_ip_list(as_wmi_format=False, server=_DEFAULT_SERVER): ''' Get the IPGrant list for the SMTP virtual server. :param bool as_wmi_format: Returns the connection IPs as a list in the format WMI expects. :param str server: The SMTP server name. :return: A dictionary of the IP and subnet pairs. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_connection_ip_list ''' ret = dict() setting = 'IPGrant' reg_separator = r',\s*' if as_wmi_format: ret = list() addresses = _get_wmi_setting('IIsIPSecuritySetting', setting, server) # WMI returns the addresses as a tuple of unicode strings, each representing # an address/subnet pair. Remove extra spaces that may be present. for unnormalized_address in addresses: ip_address, subnet = re.split(reg_separator, unnormalized_address) if as_wmi_format: ret.append('{0}, {1}'.format(ip_address, subnet)) else: ret[ip_address] = subnet if not ret: _LOG.debug('%s is empty.', setting) return ret def set_connection_ip_list(addresses=None, grant_by_default=False, server=_DEFAULT_SERVER): ''' Set the IPGrant list for the SMTP virtual server. :param str addresses: A dictionary of IP + subnet pairs. :param bool grant_by_default: Whether the addresses should be a blacklist or whitelist. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}" ''' setting = 'IPGrant' formatted_addresses = list() # It's okay to accept an empty list for set_connection_ip_list, # since an empty list may be desirable. if not addresses: addresses = dict() _LOG.debug('Empty %s specified.', setting) # Convert addresses to the 'ip_address, subnet' format used by # IIsIPSecuritySetting. for address in addresses: formatted_addresses.append('{0}, {1}'.format(address.strip(), addresses[address].strip())) current_addresses = get_connection_ip_list(as_wmi_format=True, server=server) # Order is not important, so compare to the current addresses as unordered sets. if set(formatted_addresses) == set(current_addresses): _LOG.debug('%s already contains the provided addresses.', setting) return True # First we should check GrantByDefault, and change it if necessary. current_grant_by_default = _get_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', server) if grant_by_default != current_grant_by_default: _LOG.debug('Setting GrantByDefault to: %s', grant_by_default) _set_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', grant_by_default, server) _set_wmi_setting('IIsIPSecuritySetting', setting, formatted_addresses, server) new_addresses = get_connection_ip_list(as_wmi_format=True, server=server) ret = set(formatted_addresses) == set(new_addresses) if ret: _LOG.debug('%s configured successfully: %s', setting, formatted_addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, formatted_addresses) return ret def get_relay_ip_list(server=_DEFAULT_SERVER): ''' Get the RelayIpList list for the SMTP virtual server. :param str server: The SMTP server name. :return: A list of the relay IPs. :rtype: list .. note:: A return value of None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_relay_ip_list ''' ret = list() setting = 'RelayIpList' lines = _get_wmi_setting('IIsSmtpServerSetting', setting, server) if not lines: _LOG.debug('%s is empty: %s', setting, lines) if lines is None: lines = [None] return list(lines) # WMI returns the addresses as a tuple of individual octets, so we # need to group them and reassemble them into IP addresses. i = 0 while i < len(lines): octets = [six.text_type(x) for x in lines[i: i + 4]] address = '.'.join(octets) ret.append(address) i += 4 return ret def set_relay_ip_list(addresses=None, server=_DEFAULT_SERVER): ''' Set the RelayIpList list for the SMTP virtual server. Due to the unusual way that Windows stores the relay IPs, it is advisable to retrieve the existing list you wish to set from a pre-configured server. For example, setting '127.0.0.1' as an allowed relay IP through the GUI would generate an actual relay IP list similar to the following: .. code-block:: cfg ['24.0.0.128', '32.0.0.128', '60.0.0.128', '68.0.0.128', '1.0.0.0', '76.0.0.0', '0.0.0.0', '0.0.0.0', '1.0.0.0', '1.0.0.0', '2.0.0.0', '2.0.0.0', '4.0.0.0', '0.0.0.0', '76.0.0.128', '0.0.0.0', '0.0.0.0', '0.0.0.0', '0.0.0.0', '255.255.255.255', '127.0.0.1'] .. note:: Setting the list to None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list configured, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. :param str addresses: A list of the relay IPs. The order of the list is important. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_relay_ip_list addresses="['192.168.1.1', '172.16.1.1']" ''' setting = 'RelayIpList' formatted_addresses = list() current_addresses = get_relay_ip_list(server) if list(addresses) == current_addresses: _LOG.debug('%s already contains the provided addresses.', setting) return True if addresses: # The WMI input data needs to be in the format used by RelayIpList. Order # is also important due to the way RelayIpList orders the address list. if addresses[0] is None: formatted_addresses = None else: for address in addresses: for octet in address.split('.'): formatted_addresses.append(octet) _LOG.debug('Formatted %s addresses: %s', setting, formatted_addresses) _set_wmi_setting('IIsSmtpServerSetting', setting, formatted_addresses, server) new_addresses = get_relay_ip_list(server) ret = list(addresses) == new_addresses if ret: _LOG.debug('%s configured successfully: %s', setting, addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, addresses) return ret
saltstack/salt
salt/modules/win_smtp_server.py
get_log_format_types
python
def get_log_format_types(): ''' Get all available log format names and ids. :return: A dictionary of the log format names and ids. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format_types ''' ret = dict() prefix = 'logging/' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IISLogModuleSetting() # Remove the prefix from the name. for obj in objs: name = six.text_type(obj.Name).replace(prefix, '', 1) ret[name] = six.text_type(obj.LogModuleId) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IISLogModuleSetting: %s', error) if not ret: _LOG.error('Unable to get log format types.') return ret
Get all available log format names and ids. :return: A dictionary of the log format names and ids. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format_types
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_smtp_server.py#L119-L151
null
# -*- coding: utf-8 -*- ''' Module for managing IIS SMTP server configuration on Windows servers. The Windows features 'SMTP-Server' and 'Web-WMI' must be installed. :depends: wmi ''' # IIS metabase configuration settings: # https://goo.gl/XCt1uO # IIS logging options: # https://goo.gl/RL8ki9 # https://goo.gl/iwnDow # MicrosoftIISv2 namespace in Windows 2008r2 and later: # http://goo.gl/O4m48T # Connection and relay IPs in PowerShell: # https://goo.gl/aBMZ9K # http://goo.gl/MrybFq # Import python libs from __future__ import absolute_import, unicode_literals, print_function import logging import re # Import Salt libs from salt.exceptions import SaltInvocationError import salt.utils.args import salt.utils.platform # Import 3rd-party libs from salt.ext import six try: import wmi import salt.utils.winapi _HAS_MODULE_DEPENDENCIES = True except ImportError: _HAS_MODULE_DEPENDENCIES = False _DEFAULT_SERVER = 'SmtpSvc/1' _WMI_NAMESPACE = 'MicrosoftIISv2' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_smtp_server' def __virtual__(): ''' Only works on Windows systems. ''' if salt.utils.platform.is_windows() and _HAS_MODULE_DEPENDENCIES: return __virtualname__ return False def _get_wmi_setting(wmi_class_name, setting, server): ''' Get the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class([setting], Name=server)[0] ret = getattr(objs, setting) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) return ret def _set_wmi_setting(wmi_class_name, setting, value, server): ''' Set the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) try: setattr(objs, setting, value) return True except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) return False def _normalize_server_settings(**settings): ''' Convert setting values that had been improperly converted to a dict back to a string. ''' ret = dict() settings = salt.utils.args.clean_kwargs(**settings) for setting in settings: if isinstance(settings[setting], dict): _LOG.debug('Fixing value: %s', settings[setting]) value_from_key = next(six.iterkeys(settings[setting])) ret[setting] = "{{{0}}}".format(value_from_key) else: ret[setting] = settings[setting] return ret def get_servers(): ''' Get the SMTP virtual server names. :return: A list of the SMTP virtual servers. :rtype: list CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_servers ''' ret = list() with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting() for obj in objs: ret.append(six.text_type(obj.Name)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) _LOG.debug('Found SMTP servers: %s', ret) return ret def get_server_setting(settings, server=_DEFAULT_SERVER): ''' Get the value of the setting for the SMTP virtual server. :param str settings: A list of the setting names. :param str server: The SMTP server name. :return: A dictionary of the provided settings and their values. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_server_setting settings="['MaxRecipients']" ''' ret = dict() if not settings: _LOG.warning('No settings provided.') return ret with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(settings, Name=server)[0] for setting in settings: ret[setting] = six.text_type(getattr(objs, setting)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) return ret def set_server_setting(settings, server=_DEFAULT_SERVER): ''' Set the value of the setting for the SMTP virtual server. .. note:: The setting names are case-sensitive. :param str settings: A dictionary of the setting names and their values. :param str server: The SMTP server name. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_server_setting settings="{'MaxRecipients': '500'}" ''' if not settings: _LOG.warning('No settings provided') return False # Some fields are formatted like '{data}'. Salt tries to convert these to dicts # automatically on input, so convert them back to the proper format. settings = _normalize_server_settings(**settings) current_settings = get_server_setting(settings=settings.keys(), server=server) if settings == current_settings: _LOG.debug('Settings already contain the provided values.') return True # Note that we must fetch all properties of IIsSmtpServerSetting below, since # filtering for specific properties and then attempting to set them will cause # an error like: wmi.x_wmi Unexpected COM Error -2147352567 with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) for setting in settings: if six.text_type(settings[setting]) != six.text_type(current_settings[setting]): try: setattr(objs, setting, settings[setting]) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) # Get the settings post-change so that we can verify tht all properties # were modified successfully. Track the ones that weren't. new_settings = get_server_setting(settings=settings.keys(), server=server) failed_settings = dict() for setting in settings: if six.text_type(settings[setting]) != six.text_type(new_settings[setting]): failed_settings[setting] = settings[setting] if failed_settings: _LOG.error('Failed to change settings: %s', failed_settings) return False _LOG.debug('Settings configured successfully: %s', settings.keys()) return True def get_log_format(server=_DEFAULT_SERVER): ''' Get the active log format for the SMTP virtual server. :param str server: The SMTP server name. :return: A string of the log format name. :rtype: str CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format ''' log_format_types = get_log_format_types() format_id = _get_wmi_setting('IIsSmtpServerSetting', 'LogPluginClsid', server) # Since IIsSmtpServerSetting stores the log type as an id, we need # to get the mapping from IISLogModuleSetting and extract the name. for key in log_format_types: if six.text_type(format_id) == log_format_types[key]: return key _LOG.warning('Unable to determine log format.') return None def set_log_format(log_format, server=_DEFAULT_SERVER): ''' Set the active log format for the SMTP virtual server. :param str log_format: The log format name. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_log_format 'Microsoft IIS Log File Format' ''' setting = 'LogPluginClsid' log_format_types = get_log_format_types() format_id = log_format_types.get(log_format, None) if not format_id: message = ("Invalid log format '{0}' specified. Valid formats:" ' {1}').format(log_format, log_format_types.keys()) raise SaltInvocationError(message) _LOG.debug("Id for '%s' found: %s", log_format, format_id) current_log_format = get_log_format(server) if log_format == current_log_format: _LOG.debug('%s already contains the provided format.', setting) return True _set_wmi_setting('IIsSmtpServerSetting', setting, format_id, server) new_log_format = get_log_format(server) ret = log_format == new_log_format if ret: _LOG.debug("Setting %s configured successfully: %s", setting, log_format) else: _LOG.error("Unable to configure %s with value: %s", setting, log_format) return ret def get_connection_ip_list(as_wmi_format=False, server=_DEFAULT_SERVER): ''' Get the IPGrant list for the SMTP virtual server. :param bool as_wmi_format: Returns the connection IPs as a list in the format WMI expects. :param str server: The SMTP server name. :return: A dictionary of the IP and subnet pairs. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_connection_ip_list ''' ret = dict() setting = 'IPGrant' reg_separator = r',\s*' if as_wmi_format: ret = list() addresses = _get_wmi_setting('IIsIPSecuritySetting', setting, server) # WMI returns the addresses as a tuple of unicode strings, each representing # an address/subnet pair. Remove extra spaces that may be present. for unnormalized_address in addresses: ip_address, subnet = re.split(reg_separator, unnormalized_address) if as_wmi_format: ret.append('{0}, {1}'.format(ip_address, subnet)) else: ret[ip_address] = subnet if not ret: _LOG.debug('%s is empty.', setting) return ret def set_connection_ip_list(addresses=None, grant_by_default=False, server=_DEFAULT_SERVER): ''' Set the IPGrant list for the SMTP virtual server. :param str addresses: A dictionary of IP + subnet pairs. :param bool grant_by_default: Whether the addresses should be a blacklist or whitelist. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}" ''' setting = 'IPGrant' formatted_addresses = list() # It's okay to accept an empty list for set_connection_ip_list, # since an empty list may be desirable. if not addresses: addresses = dict() _LOG.debug('Empty %s specified.', setting) # Convert addresses to the 'ip_address, subnet' format used by # IIsIPSecuritySetting. for address in addresses: formatted_addresses.append('{0}, {1}'.format(address.strip(), addresses[address].strip())) current_addresses = get_connection_ip_list(as_wmi_format=True, server=server) # Order is not important, so compare to the current addresses as unordered sets. if set(formatted_addresses) == set(current_addresses): _LOG.debug('%s already contains the provided addresses.', setting) return True # First we should check GrantByDefault, and change it if necessary. current_grant_by_default = _get_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', server) if grant_by_default != current_grant_by_default: _LOG.debug('Setting GrantByDefault to: %s', grant_by_default) _set_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', grant_by_default, server) _set_wmi_setting('IIsIPSecuritySetting', setting, formatted_addresses, server) new_addresses = get_connection_ip_list(as_wmi_format=True, server=server) ret = set(formatted_addresses) == set(new_addresses) if ret: _LOG.debug('%s configured successfully: %s', setting, formatted_addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, formatted_addresses) return ret def get_relay_ip_list(server=_DEFAULT_SERVER): ''' Get the RelayIpList list for the SMTP virtual server. :param str server: The SMTP server name. :return: A list of the relay IPs. :rtype: list .. note:: A return value of None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_relay_ip_list ''' ret = list() setting = 'RelayIpList' lines = _get_wmi_setting('IIsSmtpServerSetting', setting, server) if not lines: _LOG.debug('%s is empty: %s', setting, lines) if lines is None: lines = [None] return list(lines) # WMI returns the addresses as a tuple of individual octets, so we # need to group them and reassemble them into IP addresses. i = 0 while i < len(lines): octets = [six.text_type(x) for x in lines[i: i + 4]] address = '.'.join(octets) ret.append(address) i += 4 return ret def set_relay_ip_list(addresses=None, server=_DEFAULT_SERVER): ''' Set the RelayIpList list for the SMTP virtual server. Due to the unusual way that Windows stores the relay IPs, it is advisable to retrieve the existing list you wish to set from a pre-configured server. For example, setting '127.0.0.1' as an allowed relay IP through the GUI would generate an actual relay IP list similar to the following: .. code-block:: cfg ['24.0.0.128', '32.0.0.128', '60.0.0.128', '68.0.0.128', '1.0.0.0', '76.0.0.0', '0.0.0.0', '0.0.0.0', '1.0.0.0', '1.0.0.0', '2.0.0.0', '2.0.0.0', '4.0.0.0', '0.0.0.0', '76.0.0.128', '0.0.0.0', '0.0.0.0', '0.0.0.0', '0.0.0.0', '255.255.255.255', '127.0.0.1'] .. note:: Setting the list to None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list configured, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. :param str addresses: A list of the relay IPs. The order of the list is important. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_relay_ip_list addresses="['192.168.1.1', '172.16.1.1']" ''' setting = 'RelayIpList' formatted_addresses = list() current_addresses = get_relay_ip_list(server) if list(addresses) == current_addresses: _LOG.debug('%s already contains the provided addresses.', setting) return True if addresses: # The WMI input data needs to be in the format used by RelayIpList. Order # is also important due to the way RelayIpList orders the address list. if addresses[0] is None: formatted_addresses = None else: for address in addresses: for octet in address.split('.'): formatted_addresses.append(octet) _LOG.debug('Formatted %s addresses: %s', setting, formatted_addresses) _set_wmi_setting('IIsSmtpServerSetting', setting, formatted_addresses, server) new_addresses = get_relay_ip_list(server) ret = list(addresses) == new_addresses if ret: _LOG.debug('%s configured successfully: %s', setting, addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, addresses) return ret
saltstack/salt
salt/modules/win_smtp_server.py
get_servers
python
def get_servers(): ''' Get the SMTP virtual server names. :return: A list of the SMTP virtual servers. :rtype: list CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_servers ''' ret = list() with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting() for obj in objs: ret.append(six.text_type(obj.Name)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) _LOG.debug('Found SMTP servers: %s', ret) return ret
Get the SMTP virtual server names. :return: A list of the SMTP virtual servers. :rtype: list CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_servers
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_smtp_server.py#L154-L182
null
# -*- coding: utf-8 -*- ''' Module for managing IIS SMTP server configuration on Windows servers. The Windows features 'SMTP-Server' and 'Web-WMI' must be installed. :depends: wmi ''' # IIS metabase configuration settings: # https://goo.gl/XCt1uO # IIS logging options: # https://goo.gl/RL8ki9 # https://goo.gl/iwnDow # MicrosoftIISv2 namespace in Windows 2008r2 and later: # http://goo.gl/O4m48T # Connection and relay IPs in PowerShell: # https://goo.gl/aBMZ9K # http://goo.gl/MrybFq # Import python libs from __future__ import absolute_import, unicode_literals, print_function import logging import re # Import Salt libs from salt.exceptions import SaltInvocationError import salt.utils.args import salt.utils.platform # Import 3rd-party libs from salt.ext import six try: import wmi import salt.utils.winapi _HAS_MODULE_DEPENDENCIES = True except ImportError: _HAS_MODULE_DEPENDENCIES = False _DEFAULT_SERVER = 'SmtpSvc/1' _WMI_NAMESPACE = 'MicrosoftIISv2' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_smtp_server' def __virtual__(): ''' Only works on Windows systems. ''' if salt.utils.platform.is_windows() and _HAS_MODULE_DEPENDENCIES: return __virtualname__ return False def _get_wmi_setting(wmi_class_name, setting, server): ''' Get the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class([setting], Name=server)[0] ret = getattr(objs, setting) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) return ret def _set_wmi_setting(wmi_class_name, setting, value, server): ''' Set the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) try: setattr(objs, setting, value) return True except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) return False def _normalize_server_settings(**settings): ''' Convert setting values that had been improperly converted to a dict back to a string. ''' ret = dict() settings = salt.utils.args.clean_kwargs(**settings) for setting in settings: if isinstance(settings[setting], dict): _LOG.debug('Fixing value: %s', settings[setting]) value_from_key = next(six.iterkeys(settings[setting])) ret[setting] = "{{{0}}}".format(value_from_key) else: ret[setting] = settings[setting] return ret def get_log_format_types(): ''' Get all available log format names and ids. :return: A dictionary of the log format names and ids. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format_types ''' ret = dict() prefix = 'logging/' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IISLogModuleSetting() # Remove the prefix from the name. for obj in objs: name = six.text_type(obj.Name).replace(prefix, '', 1) ret[name] = six.text_type(obj.LogModuleId) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IISLogModuleSetting: %s', error) if not ret: _LOG.error('Unable to get log format types.') return ret def get_server_setting(settings, server=_DEFAULT_SERVER): ''' Get the value of the setting for the SMTP virtual server. :param str settings: A list of the setting names. :param str server: The SMTP server name. :return: A dictionary of the provided settings and their values. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_server_setting settings="['MaxRecipients']" ''' ret = dict() if not settings: _LOG.warning('No settings provided.') return ret with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(settings, Name=server)[0] for setting in settings: ret[setting] = six.text_type(getattr(objs, setting)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) return ret def set_server_setting(settings, server=_DEFAULT_SERVER): ''' Set the value of the setting for the SMTP virtual server. .. note:: The setting names are case-sensitive. :param str settings: A dictionary of the setting names and their values. :param str server: The SMTP server name. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_server_setting settings="{'MaxRecipients': '500'}" ''' if not settings: _LOG.warning('No settings provided') return False # Some fields are formatted like '{data}'. Salt tries to convert these to dicts # automatically on input, so convert them back to the proper format. settings = _normalize_server_settings(**settings) current_settings = get_server_setting(settings=settings.keys(), server=server) if settings == current_settings: _LOG.debug('Settings already contain the provided values.') return True # Note that we must fetch all properties of IIsSmtpServerSetting below, since # filtering for specific properties and then attempting to set them will cause # an error like: wmi.x_wmi Unexpected COM Error -2147352567 with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) for setting in settings: if six.text_type(settings[setting]) != six.text_type(current_settings[setting]): try: setattr(objs, setting, settings[setting]) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) # Get the settings post-change so that we can verify tht all properties # were modified successfully. Track the ones that weren't. new_settings = get_server_setting(settings=settings.keys(), server=server) failed_settings = dict() for setting in settings: if six.text_type(settings[setting]) != six.text_type(new_settings[setting]): failed_settings[setting] = settings[setting] if failed_settings: _LOG.error('Failed to change settings: %s', failed_settings) return False _LOG.debug('Settings configured successfully: %s', settings.keys()) return True def get_log_format(server=_DEFAULT_SERVER): ''' Get the active log format for the SMTP virtual server. :param str server: The SMTP server name. :return: A string of the log format name. :rtype: str CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format ''' log_format_types = get_log_format_types() format_id = _get_wmi_setting('IIsSmtpServerSetting', 'LogPluginClsid', server) # Since IIsSmtpServerSetting stores the log type as an id, we need # to get the mapping from IISLogModuleSetting and extract the name. for key in log_format_types: if six.text_type(format_id) == log_format_types[key]: return key _LOG.warning('Unable to determine log format.') return None def set_log_format(log_format, server=_DEFAULT_SERVER): ''' Set the active log format for the SMTP virtual server. :param str log_format: The log format name. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_log_format 'Microsoft IIS Log File Format' ''' setting = 'LogPluginClsid' log_format_types = get_log_format_types() format_id = log_format_types.get(log_format, None) if not format_id: message = ("Invalid log format '{0}' specified. Valid formats:" ' {1}').format(log_format, log_format_types.keys()) raise SaltInvocationError(message) _LOG.debug("Id for '%s' found: %s", log_format, format_id) current_log_format = get_log_format(server) if log_format == current_log_format: _LOG.debug('%s already contains the provided format.', setting) return True _set_wmi_setting('IIsSmtpServerSetting', setting, format_id, server) new_log_format = get_log_format(server) ret = log_format == new_log_format if ret: _LOG.debug("Setting %s configured successfully: %s", setting, log_format) else: _LOG.error("Unable to configure %s with value: %s", setting, log_format) return ret def get_connection_ip_list(as_wmi_format=False, server=_DEFAULT_SERVER): ''' Get the IPGrant list for the SMTP virtual server. :param bool as_wmi_format: Returns the connection IPs as a list in the format WMI expects. :param str server: The SMTP server name. :return: A dictionary of the IP and subnet pairs. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_connection_ip_list ''' ret = dict() setting = 'IPGrant' reg_separator = r',\s*' if as_wmi_format: ret = list() addresses = _get_wmi_setting('IIsIPSecuritySetting', setting, server) # WMI returns the addresses as a tuple of unicode strings, each representing # an address/subnet pair. Remove extra spaces that may be present. for unnormalized_address in addresses: ip_address, subnet = re.split(reg_separator, unnormalized_address) if as_wmi_format: ret.append('{0}, {1}'.format(ip_address, subnet)) else: ret[ip_address] = subnet if not ret: _LOG.debug('%s is empty.', setting) return ret def set_connection_ip_list(addresses=None, grant_by_default=False, server=_DEFAULT_SERVER): ''' Set the IPGrant list for the SMTP virtual server. :param str addresses: A dictionary of IP + subnet pairs. :param bool grant_by_default: Whether the addresses should be a blacklist or whitelist. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}" ''' setting = 'IPGrant' formatted_addresses = list() # It's okay to accept an empty list for set_connection_ip_list, # since an empty list may be desirable. if not addresses: addresses = dict() _LOG.debug('Empty %s specified.', setting) # Convert addresses to the 'ip_address, subnet' format used by # IIsIPSecuritySetting. for address in addresses: formatted_addresses.append('{0}, {1}'.format(address.strip(), addresses[address].strip())) current_addresses = get_connection_ip_list(as_wmi_format=True, server=server) # Order is not important, so compare to the current addresses as unordered sets. if set(formatted_addresses) == set(current_addresses): _LOG.debug('%s already contains the provided addresses.', setting) return True # First we should check GrantByDefault, and change it if necessary. current_grant_by_default = _get_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', server) if grant_by_default != current_grant_by_default: _LOG.debug('Setting GrantByDefault to: %s', grant_by_default) _set_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', grant_by_default, server) _set_wmi_setting('IIsIPSecuritySetting', setting, formatted_addresses, server) new_addresses = get_connection_ip_list(as_wmi_format=True, server=server) ret = set(formatted_addresses) == set(new_addresses) if ret: _LOG.debug('%s configured successfully: %s', setting, formatted_addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, formatted_addresses) return ret def get_relay_ip_list(server=_DEFAULT_SERVER): ''' Get the RelayIpList list for the SMTP virtual server. :param str server: The SMTP server name. :return: A list of the relay IPs. :rtype: list .. note:: A return value of None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_relay_ip_list ''' ret = list() setting = 'RelayIpList' lines = _get_wmi_setting('IIsSmtpServerSetting', setting, server) if not lines: _LOG.debug('%s is empty: %s', setting, lines) if lines is None: lines = [None] return list(lines) # WMI returns the addresses as a tuple of individual octets, so we # need to group them and reassemble them into IP addresses. i = 0 while i < len(lines): octets = [six.text_type(x) for x in lines[i: i + 4]] address = '.'.join(octets) ret.append(address) i += 4 return ret def set_relay_ip_list(addresses=None, server=_DEFAULT_SERVER): ''' Set the RelayIpList list for the SMTP virtual server. Due to the unusual way that Windows stores the relay IPs, it is advisable to retrieve the existing list you wish to set from a pre-configured server. For example, setting '127.0.0.1' as an allowed relay IP through the GUI would generate an actual relay IP list similar to the following: .. code-block:: cfg ['24.0.0.128', '32.0.0.128', '60.0.0.128', '68.0.0.128', '1.0.0.0', '76.0.0.0', '0.0.0.0', '0.0.0.0', '1.0.0.0', '1.0.0.0', '2.0.0.0', '2.0.0.0', '4.0.0.0', '0.0.0.0', '76.0.0.128', '0.0.0.0', '0.0.0.0', '0.0.0.0', '0.0.0.0', '255.255.255.255', '127.0.0.1'] .. note:: Setting the list to None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list configured, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. :param str addresses: A list of the relay IPs. The order of the list is important. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_relay_ip_list addresses="['192.168.1.1', '172.16.1.1']" ''' setting = 'RelayIpList' formatted_addresses = list() current_addresses = get_relay_ip_list(server) if list(addresses) == current_addresses: _LOG.debug('%s already contains the provided addresses.', setting) return True if addresses: # The WMI input data needs to be in the format used by RelayIpList. Order # is also important due to the way RelayIpList orders the address list. if addresses[0] is None: formatted_addresses = None else: for address in addresses: for octet in address.split('.'): formatted_addresses.append(octet) _LOG.debug('Formatted %s addresses: %s', setting, formatted_addresses) _set_wmi_setting('IIsSmtpServerSetting', setting, formatted_addresses, server) new_addresses = get_relay_ip_list(server) ret = list(addresses) == new_addresses if ret: _LOG.debug('%s configured successfully: %s', setting, addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, addresses) return ret
saltstack/salt
salt/modules/win_smtp_server.py
get_server_setting
python
def get_server_setting(settings, server=_DEFAULT_SERVER): ''' Get the value of the setting for the SMTP virtual server. :param str settings: A list of the setting names. :param str server: The SMTP server name. :return: A dictionary of the provided settings and their values. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_server_setting settings="['MaxRecipients']" ''' ret = dict() if not settings: _LOG.warning('No settings provided.') return ret with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(settings, Name=server)[0] for setting in settings: ret[setting] = six.text_type(getattr(objs, setting)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) return ret
Get the value of the setting for the SMTP virtual server. :param str settings: A list of the setting names. :param str server: The SMTP server name. :return: A dictionary of the provided settings and their values. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_server_setting settings="['MaxRecipients']"
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_smtp_server.py#L185-L218
null
# -*- coding: utf-8 -*- ''' Module for managing IIS SMTP server configuration on Windows servers. The Windows features 'SMTP-Server' and 'Web-WMI' must be installed. :depends: wmi ''' # IIS metabase configuration settings: # https://goo.gl/XCt1uO # IIS logging options: # https://goo.gl/RL8ki9 # https://goo.gl/iwnDow # MicrosoftIISv2 namespace in Windows 2008r2 and later: # http://goo.gl/O4m48T # Connection and relay IPs in PowerShell: # https://goo.gl/aBMZ9K # http://goo.gl/MrybFq # Import python libs from __future__ import absolute_import, unicode_literals, print_function import logging import re # Import Salt libs from salt.exceptions import SaltInvocationError import salt.utils.args import salt.utils.platform # Import 3rd-party libs from salt.ext import six try: import wmi import salt.utils.winapi _HAS_MODULE_DEPENDENCIES = True except ImportError: _HAS_MODULE_DEPENDENCIES = False _DEFAULT_SERVER = 'SmtpSvc/1' _WMI_NAMESPACE = 'MicrosoftIISv2' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_smtp_server' def __virtual__(): ''' Only works on Windows systems. ''' if salt.utils.platform.is_windows() and _HAS_MODULE_DEPENDENCIES: return __virtualname__ return False def _get_wmi_setting(wmi_class_name, setting, server): ''' Get the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class([setting], Name=server)[0] ret = getattr(objs, setting) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) return ret def _set_wmi_setting(wmi_class_name, setting, value, server): ''' Set the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) try: setattr(objs, setting, value) return True except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) return False def _normalize_server_settings(**settings): ''' Convert setting values that had been improperly converted to a dict back to a string. ''' ret = dict() settings = salt.utils.args.clean_kwargs(**settings) for setting in settings: if isinstance(settings[setting], dict): _LOG.debug('Fixing value: %s', settings[setting]) value_from_key = next(six.iterkeys(settings[setting])) ret[setting] = "{{{0}}}".format(value_from_key) else: ret[setting] = settings[setting] return ret def get_log_format_types(): ''' Get all available log format names and ids. :return: A dictionary of the log format names and ids. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format_types ''' ret = dict() prefix = 'logging/' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IISLogModuleSetting() # Remove the prefix from the name. for obj in objs: name = six.text_type(obj.Name).replace(prefix, '', 1) ret[name] = six.text_type(obj.LogModuleId) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IISLogModuleSetting: %s', error) if not ret: _LOG.error('Unable to get log format types.') return ret def get_servers(): ''' Get the SMTP virtual server names. :return: A list of the SMTP virtual servers. :rtype: list CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_servers ''' ret = list() with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting() for obj in objs: ret.append(six.text_type(obj.Name)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) _LOG.debug('Found SMTP servers: %s', ret) return ret def set_server_setting(settings, server=_DEFAULT_SERVER): ''' Set the value of the setting for the SMTP virtual server. .. note:: The setting names are case-sensitive. :param str settings: A dictionary of the setting names and their values. :param str server: The SMTP server name. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_server_setting settings="{'MaxRecipients': '500'}" ''' if not settings: _LOG.warning('No settings provided') return False # Some fields are formatted like '{data}'. Salt tries to convert these to dicts # automatically on input, so convert them back to the proper format. settings = _normalize_server_settings(**settings) current_settings = get_server_setting(settings=settings.keys(), server=server) if settings == current_settings: _LOG.debug('Settings already contain the provided values.') return True # Note that we must fetch all properties of IIsSmtpServerSetting below, since # filtering for specific properties and then attempting to set them will cause # an error like: wmi.x_wmi Unexpected COM Error -2147352567 with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) for setting in settings: if six.text_type(settings[setting]) != six.text_type(current_settings[setting]): try: setattr(objs, setting, settings[setting]) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) # Get the settings post-change so that we can verify tht all properties # were modified successfully. Track the ones that weren't. new_settings = get_server_setting(settings=settings.keys(), server=server) failed_settings = dict() for setting in settings: if six.text_type(settings[setting]) != six.text_type(new_settings[setting]): failed_settings[setting] = settings[setting] if failed_settings: _LOG.error('Failed to change settings: %s', failed_settings) return False _LOG.debug('Settings configured successfully: %s', settings.keys()) return True def get_log_format(server=_DEFAULT_SERVER): ''' Get the active log format for the SMTP virtual server. :param str server: The SMTP server name. :return: A string of the log format name. :rtype: str CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format ''' log_format_types = get_log_format_types() format_id = _get_wmi_setting('IIsSmtpServerSetting', 'LogPluginClsid', server) # Since IIsSmtpServerSetting stores the log type as an id, we need # to get the mapping from IISLogModuleSetting and extract the name. for key in log_format_types: if six.text_type(format_id) == log_format_types[key]: return key _LOG.warning('Unable to determine log format.') return None def set_log_format(log_format, server=_DEFAULT_SERVER): ''' Set the active log format for the SMTP virtual server. :param str log_format: The log format name. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_log_format 'Microsoft IIS Log File Format' ''' setting = 'LogPluginClsid' log_format_types = get_log_format_types() format_id = log_format_types.get(log_format, None) if not format_id: message = ("Invalid log format '{0}' specified. Valid formats:" ' {1}').format(log_format, log_format_types.keys()) raise SaltInvocationError(message) _LOG.debug("Id for '%s' found: %s", log_format, format_id) current_log_format = get_log_format(server) if log_format == current_log_format: _LOG.debug('%s already contains the provided format.', setting) return True _set_wmi_setting('IIsSmtpServerSetting', setting, format_id, server) new_log_format = get_log_format(server) ret = log_format == new_log_format if ret: _LOG.debug("Setting %s configured successfully: %s", setting, log_format) else: _LOG.error("Unable to configure %s with value: %s", setting, log_format) return ret def get_connection_ip_list(as_wmi_format=False, server=_DEFAULT_SERVER): ''' Get the IPGrant list for the SMTP virtual server. :param bool as_wmi_format: Returns the connection IPs as a list in the format WMI expects. :param str server: The SMTP server name. :return: A dictionary of the IP and subnet pairs. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_connection_ip_list ''' ret = dict() setting = 'IPGrant' reg_separator = r',\s*' if as_wmi_format: ret = list() addresses = _get_wmi_setting('IIsIPSecuritySetting', setting, server) # WMI returns the addresses as a tuple of unicode strings, each representing # an address/subnet pair. Remove extra spaces that may be present. for unnormalized_address in addresses: ip_address, subnet = re.split(reg_separator, unnormalized_address) if as_wmi_format: ret.append('{0}, {1}'.format(ip_address, subnet)) else: ret[ip_address] = subnet if not ret: _LOG.debug('%s is empty.', setting) return ret def set_connection_ip_list(addresses=None, grant_by_default=False, server=_DEFAULT_SERVER): ''' Set the IPGrant list for the SMTP virtual server. :param str addresses: A dictionary of IP + subnet pairs. :param bool grant_by_default: Whether the addresses should be a blacklist or whitelist. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}" ''' setting = 'IPGrant' formatted_addresses = list() # It's okay to accept an empty list for set_connection_ip_list, # since an empty list may be desirable. if not addresses: addresses = dict() _LOG.debug('Empty %s specified.', setting) # Convert addresses to the 'ip_address, subnet' format used by # IIsIPSecuritySetting. for address in addresses: formatted_addresses.append('{0}, {1}'.format(address.strip(), addresses[address].strip())) current_addresses = get_connection_ip_list(as_wmi_format=True, server=server) # Order is not important, so compare to the current addresses as unordered sets. if set(formatted_addresses) == set(current_addresses): _LOG.debug('%s already contains the provided addresses.', setting) return True # First we should check GrantByDefault, and change it if necessary. current_grant_by_default = _get_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', server) if grant_by_default != current_grant_by_default: _LOG.debug('Setting GrantByDefault to: %s', grant_by_default) _set_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', grant_by_default, server) _set_wmi_setting('IIsIPSecuritySetting', setting, formatted_addresses, server) new_addresses = get_connection_ip_list(as_wmi_format=True, server=server) ret = set(formatted_addresses) == set(new_addresses) if ret: _LOG.debug('%s configured successfully: %s', setting, formatted_addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, formatted_addresses) return ret def get_relay_ip_list(server=_DEFAULT_SERVER): ''' Get the RelayIpList list for the SMTP virtual server. :param str server: The SMTP server name. :return: A list of the relay IPs. :rtype: list .. note:: A return value of None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_relay_ip_list ''' ret = list() setting = 'RelayIpList' lines = _get_wmi_setting('IIsSmtpServerSetting', setting, server) if not lines: _LOG.debug('%s is empty: %s', setting, lines) if lines is None: lines = [None] return list(lines) # WMI returns the addresses as a tuple of individual octets, so we # need to group them and reassemble them into IP addresses. i = 0 while i < len(lines): octets = [six.text_type(x) for x in lines[i: i + 4]] address = '.'.join(octets) ret.append(address) i += 4 return ret def set_relay_ip_list(addresses=None, server=_DEFAULT_SERVER): ''' Set the RelayIpList list for the SMTP virtual server. Due to the unusual way that Windows stores the relay IPs, it is advisable to retrieve the existing list you wish to set from a pre-configured server. For example, setting '127.0.0.1' as an allowed relay IP through the GUI would generate an actual relay IP list similar to the following: .. code-block:: cfg ['24.0.0.128', '32.0.0.128', '60.0.0.128', '68.0.0.128', '1.0.0.0', '76.0.0.0', '0.0.0.0', '0.0.0.0', '1.0.0.0', '1.0.0.0', '2.0.0.0', '2.0.0.0', '4.0.0.0', '0.0.0.0', '76.0.0.128', '0.0.0.0', '0.0.0.0', '0.0.0.0', '0.0.0.0', '255.255.255.255', '127.0.0.1'] .. note:: Setting the list to None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list configured, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. :param str addresses: A list of the relay IPs. The order of the list is important. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_relay_ip_list addresses="['192.168.1.1', '172.16.1.1']" ''' setting = 'RelayIpList' formatted_addresses = list() current_addresses = get_relay_ip_list(server) if list(addresses) == current_addresses: _LOG.debug('%s already contains the provided addresses.', setting) return True if addresses: # The WMI input data needs to be in the format used by RelayIpList. Order # is also important due to the way RelayIpList orders the address list. if addresses[0] is None: formatted_addresses = None else: for address in addresses: for octet in address.split('.'): formatted_addresses.append(octet) _LOG.debug('Formatted %s addresses: %s', setting, formatted_addresses) _set_wmi_setting('IIsSmtpServerSetting', setting, formatted_addresses, server) new_addresses = get_relay_ip_list(server) ret = list(addresses) == new_addresses if ret: _LOG.debug('%s configured successfully: %s', setting, addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, addresses) return ret
saltstack/salt
salt/modules/win_smtp_server.py
set_server_setting
python
def set_server_setting(settings, server=_DEFAULT_SERVER): ''' Set the value of the setting for the SMTP virtual server. .. note:: The setting names are case-sensitive. :param str settings: A dictionary of the setting names and their values. :param str server: The SMTP server name. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_server_setting settings="{'MaxRecipients': '500'}" ''' if not settings: _LOG.warning('No settings provided') return False # Some fields are formatted like '{data}'. Salt tries to convert these to dicts # automatically on input, so convert them back to the proper format. settings = _normalize_server_settings(**settings) current_settings = get_server_setting(settings=settings.keys(), server=server) if settings == current_settings: _LOG.debug('Settings already contain the provided values.') return True # Note that we must fetch all properties of IIsSmtpServerSetting below, since # filtering for specific properties and then attempting to set them will cause # an error like: wmi.x_wmi Unexpected COM Error -2147352567 with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) for setting in settings: if six.text_type(settings[setting]) != six.text_type(current_settings[setting]): try: setattr(objs, setting, settings[setting]) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) # Get the settings post-change so that we can verify tht all properties # were modified successfully. Track the ones that weren't. new_settings = get_server_setting(settings=settings.keys(), server=server) failed_settings = dict() for setting in settings: if six.text_type(settings[setting]) != six.text_type(new_settings[setting]): failed_settings[setting] = settings[setting] if failed_settings: _LOG.error('Failed to change settings: %s', failed_settings) return False _LOG.debug('Settings configured successfully: %s', settings.keys()) return True
Set the value of the setting for the SMTP virtual server. .. note:: The setting names are case-sensitive. :param str settings: A dictionary of the setting names and their values. :param str server: The SMTP server name. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_server_setting settings="{'MaxRecipients': '500'}"
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_smtp_server.py#L221-L289
[ "def _normalize_server_settings(**settings):\n '''\n Convert setting values that had been improperly converted to a dict back to a string.\n '''\n ret = dict()\n settings = salt.utils.args.clean_kwargs(**settings)\n\n for setting in settings:\n if isinstance(settings[setting], dict):\n _LOG.debug('Fixing value: %s', settings[setting])\n value_from_key = next(six.iterkeys(settings[setting]))\n\n ret[setting] = \"{{{0}}}\".format(value_from_key)\n else:\n ret[setting] = settings[setting]\n return ret\n", "def get_server_setting(settings, server=_DEFAULT_SERVER):\n '''\n Get the value of the setting for the SMTP virtual server.\n\n :param str settings: A list of the setting names.\n :param str server: The SMTP server name.\n\n :return: A dictionary of the provided settings and their values.\n :rtype: dict\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' win_smtp_server.get_server_setting settings=\"['MaxRecipients']\"\n '''\n ret = dict()\n\n if not settings:\n _LOG.warning('No settings provided.')\n return ret\n\n with salt.utils.winapi.Com():\n try:\n connection = wmi.WMI(namespace=_WMI_NAMESPACE)\n objs = connection.IIsSmtpServerSetting(settings, Name=server)[0]\n\n for setting in settings:\n ret[setting] = six.text_type(getattr(objs, setting))\n except wmi.x_wmi as error:\n _LOG.error('Encountered WMI error: %s', error.com_error)\n except (AttributeError, IndexError) as error:\n _LOG.error('Error getting IIsSmtpServerSetting: %s', error)\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module for managing IIS SMTP server configuration on Windows servers. The Windows features 'SMTP-Server' and 'Web-WMI' must be installed. :depends: wmi ''' # IIS metabase configuration settings: # https://goo.gl/XCt1uO # IIS logging options: # https://goo.gl/RL8ki9 # https://goo.gl/iwnDow # MicrosoftIISv2 namespace in Windows 2008r2 and later: # http://goo.gl/O4m48T # Connection and relay IPs in PowerShell: # https://goo.gl/aBMZ9K # http://goo.gl/MrybFq # Import python libs from __future__ import absolute_import, unicode_literals, print_function import logging import re # Import Salt libs from salt.exceptions import SaltInvocationError import salt.utils.args import salt.utils.platform # Import 3rd-party libs from salt.ext import six try: import wmi import salt.utils.winapi _HAS_MODULE_DEPENDENCIES = True except ImportError: _HAS_MODULE_DEPENDENCIES = False _DEFAULT_SERVER = 'SmtpSvc/1' _WMI_NAMESPACE = 'MicrosoftIISv2' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_smtp_server' def __virtual__(): ''' Only works on Windows systems. ''' if salt.utils.platform.is_windows() and _HAS_MODULE_DEPENDENCIES: return __virtualname__ return False def _get_wmi_setting(wmi_class_name, setting, server): ''' Get the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class([setting], Name=server)[0] ret = getattr(objs, setting) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) return ret def _set_wmi_setting(wmi_class_name, setting, value, server): ''' Set the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) try: setattr(objs, setting, value) return True except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) return False def _normalize_server_settings(**settings): ''' Convert setting values that had been improperly converted to a dict back to a string. ''' ret = dict() settings = salt.utils.args.clean_kwargs(**settings) for setting in settings: if isinstance(settings[setting], dict): _LOG.debug('Fixing value: %s', settings[setting]) value_from_key = next(six.iterkeys(settings[setting])) ret[setting] = "{{{0}}}".format(value_from_key) else: ret[setting] = settings[setting] return ret def get_log_format_types(): ''' Get all available log format names and ids. :return: A dictionary of the log format names and ids. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format_types ''' ret = dict() prefix = 'logging/' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IISLogModuleSetting() # Remove the prefix from the name. for obj in objs: name = six.text_type(obj.Name).replace(prefix, '', 1) ret[name] = six.text_type(obj.LogModuleId) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IISLogModuleSetting: %s', error) if not ret: _LOG.error('Unable to get log format types.') return ret def get_servers(): ''' Get the SMTP virtual server names. :return: A list of the SMTP virtual servers. :rtype: list CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_servers ''' ret = list() with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting() for obj in objs: ret.append(six.text_type(obj.Name)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) _LOG.debug('Found SMTP servers: %s', ret) return ret def get_server_setting(settings, server=_DEFAULT_SERVER): ''' Get the value of the setting for the SMTP virtual server. :param str settings: A list of the setting names. :param str server: The SMTP server name. :return: A dictionary of the provided settings and their values. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_server_setting settings="['MaxRecipients']" ''' ret = dict() if not settings: _LOG.warning('No settings provided.') return ret with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(settings, Name=server)[0] for setting in settings: ret[setting] = six.text_type(getattr(objs, setting)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) return ret def get_log_format(server=_DEFAULT_SERVER): ''' Get the active log format for the SMTP virtual server. :param str server: The SMTP server name. :return: A string of the log format name. :rtype: str CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format ''' log_format_types = get_log_format_types() format_id = _get_wmi_setting('IIsSmtpServerSetting', 'LogPluginClsid', server) # Since IIsSmtpServerSetting stores the log type as an id, we need # to get the mapping from IISLogModuleSetting and extract the name. for key in log_format_types: if six.text_type(format_id) == log_format_types[key]: return key _LOG.warning('Unable to determine log format.') return None def set_log_format(log_format, server=_DEFAULT_SERVER): ''' Set the active log format for the SMTP virtual server. :param str log_format: The log format name. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_log_format 'Microsoft IIS Log File Format' ''' setting = 'LogPluginClsid' log_format_types = get_log_format_types() format_id = log_format_types.get(log_format, None) if not format_id: message = ("Invalid log format '{0}' specified. Valid formats:" ' {1}').format(log_format, log_format_types.keys()) raise SaltInvocationError(message) _LOG.debug("Id for '%s' found: %s", log_format, format_id) current_log_format = get_log_format(server) if log_format == current_log_format: _LOG.debug('%s already contains the provided format.', setting) return True _set_wmi_setting('IIsSmtpServerSetting', setting, format_id, server) new_log_format = get_log_format(server) ret = log_format == new_log_format if ret: _LOG.debug("Setting %s configured successfully: %s", setting, log_format) else: _LOG.error("Unable to configure %s with value: %s", setting, log_format) return ret def get_connection_ip_list(as_wmi_format=False, server=_DEFAULT_SERVER): ''' Get the IPGrant list for the SMTP virtual server. :param bool as_wmi_format: Returns the connection IPs as a list in the format WMI expects. :param str server: The SMTP server name. :return: A dictionary of the IP and subnet pairs. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_connection_ip_list ''' ret = dict() setting = 'IPGrant' reg_separator = r',\s*' if as_wmi_format: ret = list() addresses = _get_wmi_setting('IIsIPSecuritySetting', setting, server) # WMI returns the addresses as a tuple of unicode strings, each representing # an address/subnet pair. Remove extra spaces that may be present. for unnormalized_address in addresses: ip_address, subnet = re.split(reg_separator, unnormalized_address) if as_wmi_format: ret.append('{0}, {1}'.format(ip_address, subnet)) else: ret[ip_address] = subnet if not ret: _LOG.debug('%s is empty.', setting) return ret def set_connection_ip_list(addresses=None, grant_by_default=False, server=_DEFAULT_SERVER): ''' Set the IPGrant list for the SMTP virtual server. :param str addresses: A dictionary of IP + subnet pairs. :param bool grant_by_default: Whether the addresses should be a blacklist or whitelist. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}" ''' setting = 'IPGrant' formatted_addresses = list() # It's okay to accept an empty list for set_connection_ip_list, # since an empty list may be desirable. if not addresses: addresses = dict() _LOG.debug('Empty %s specified.', setting) # Convert addresses to the 'ip_address, subnet' format used by # IIsIPSecuritySetting. for address in addresses: formatted_addresses.append('{0}, {1}'.format(address.strip(), addresses[address].strip())) current_addresses = get_connection_ip_list(as_wmi_format=True, server=server) # Order is not important, so compare to the current addresses as unordered sets. if set(formatted_addresses) == set(current_addresses): _LOG.debug('%s already contains the provided addresses.', setting) return True # First we should check GrantByDefault, and change it if necessary. current_grant_by_default = _get_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', server) if grant_by_default != current_grant_by_default: _LOG.debug('Setting GrantByDefault to: %s', grant_by_default) _set_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', grant_by_default, server) _set_wmi_setting('IIsIPSecuritySetting', setting, formatted_addresses, server) new_addresses = get_connection_ip_list(as_wmi_format=True, server=server) ret = set(formatted_addresses) == set(new_addresses) if ret: _LOG.debug('%s configured successfully: %s', setting, formatted_addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, formatted_addresses) return ret def get_relay_ip_list(server=_DEFAULT_SERVER): ''' Get the RelayIpList list for the SMTP virtual server. :param str server: The SMTP server name. :return: A list of the relay IPs. :rtype: list .. note:: A return value of None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_relay_ip_list ''' ret = list() setting = 'RelayIpList' lines = _get_wmi_setting('IIsSmtpServerSetting', setting, server) if not lines: _LOG.debug('%s is empty: %s', setting, lines) if lines is None: lines = [None] return list(lines) # WMI returns the addresses as a tuple of individual octets, so we # need to group them and reassemble them into IP addresses. i = 0 while i < len(lines): octets = [six.text_type(x) for x in lines[i: i + 4]] address = '.'.join(octets) ret.append(address) i += 4 return ret def set_relay_ip_list(addresses=None, server=_DEFAULT_SERVER): ''' Set the RelayIpList list for the SMTP virtual server. Due to the unusual way that Windows stores the relay IPs, it is advisable to retrieve the existing list you wish to set from a pre-configured server. For example, setting '127.0.0.1' as an allowed relay IP through the GUI would generate an actual relay IP list similar to the following: .. code-block:: cfg ['24.0.0.128', '32.0.0.128', '60.0.0.128', '68.0.0.128', '1.0.0.0', '76.0.0.0', '0.0.0.0', '0.0.0.0', '1.0.0.0', '1.0.0.0', '2.0.0.0', '2.0.0.0', '4.0.0.0', '0.0.0.0', '76.0.0.128', '0.0.0.0', '0.0.0.0', '0.0.0.0', '0.0.0.0', '255.255.255.255', '127.0.0.1'] .. note:: Setting the list to None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list configured, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. :param str addresses: A list of the relay IPs. The order of the list is important. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_relay_ip_list addresses="['192.168.1.1', '172.16.1.1']" ''' setting = 'RelayIpList' formatted_addresses = list() current_addresses = get_relay_ip_list(server) if list(addresses) == current_addresses: _LOG.debug('%s already contains the provided addresses.', setting) return True if addresses: # The WMI input data needs to be in the format used by RelayIpList. Order # is also important due to the way RelayIpList orders the address list. if addresses[0] is None: formatted_addresses = None else: for address in addresses: for octet in address.split('.'): formatted_addresses.append(octet) _LOG.debug('Formatted %s addresses: %s', setting, formatted_addresses) _set_wmi_setting('IIsSmtpServerSetting', setting, formatted_addresses, server) new_addresses = get_relay_ip_list(server) ret = list(addresses) == new_addresses if ret: _LOG.debug('%s configured successfully: %s', setting, addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, addresses) return ret
saltstack/salt
salt/modules/win_smtp_server.py
get_log_format
python
def get_log_format(server=_DEFAULT_SERVER): ''' Get the active log format for the SMTP virtual server. :param str server: The SMTP server name. :return: A string of the log format name. :rtype: str CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format ''' log_format_types = get_log_format_types() format_id = _get_wmi_setting('IIsSmtpServerSetting', 'LogPluginClsid', server) # Since IIsSmtpServerSetting stores the log type as an id, we need # to get the mapping from IISLogModuleSetting and extract the name. for key in log_format_types: if six.text_type(format_id) == log_format_types[key]: return key _LOG.warning('Unable to determine log format.') return None
Get the active log format for the SMTP virtual server. :param str server: The SMTP server name. :return: A string of the log format name. :rtype: str CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_smtp_server.py#L292-L316
[ "def _get_wmi_setting(wmi_class_name, setting, server):\n '''\n Get the value of the setting for the provided class.\n '''\n with salt.utils.winapi.Com():\n try:\n connection = wmi.WMI(namespace=_WMI_NAMESPACE)\n wmi_class = getattr(connection, wmi_class_name)\n\n objs = wmi_class([setting], Name=server)[0]\n ret = getattr(objs, setting)\n except wmi.x_wmi as error:\n _LOG.error('Encountered WMI error: %s', error.com_error)\n except (AttributeError, IndexError) as error:\n _LOG.error('Error getting %s: %s', wmi_class_name, error)\n return ret\n", "def get_log_format_types():\n '''\n Get all available log format names and ids.\n\n :return: A dictionary of the log format names and ids.\n :rtype: dict\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' win_smtp_server.get_log_format_types\n '''\n ret = dict()\n prefix = 'logging/'\n\n with salt.utils.winapi.Com():\n try:\n connection = wmi.WMI(namespace=_WMI_NAMESPACE)\n objs = connection.IISLogModuleSetting()\n\n # Remove the prefix from the name.\n for obj in objs:\n name = six.text_type(obj.Name).replace(prefix, '', 1)\n ret[name] = six.text_type(obj.LogModuleId)\n except wmi.x_wmi as error:\n _LOG.error('Encountered WMI error: %s', error.com_error)\n except (AttributeError, IndexError) as error:\n _LOG.error('Error getting IISLogModuleSetting: %s', error)\n\n if not ret:\n _LOG.error('Unable to get log format types.')\n return ret\n" ]
# -*- coding: utf-8 -*- ''' Module for managing IIS SMTP server configuration on Windows servers. The Windows features 'SMTP-Server' and 'Web-WMI' must be installed. :depends: wmi ''' # IIS metabase configuration settings: # https://goo.gl/XCt1uO # IIS logging options: # https://goo.gl/RL8ki9 # https://goo.gl/iwnDow # MicrosoftIISv2 namespace in Windows 2008r2 and later: # http://goo.gl/O4m48T # Connection and relay IPs in PowerShell: # https://goo.gl/aBMZ9K # http://goo.gl/MrybFq # Import python libs from __future__ import absolute_import, unicode_literals, print_function import logging import re # Import Salt libs from salt.exceptions import SaltInvocationError import salt.utils.args import salt.utils.platform # Import 3rd-party libs from salt.ext import six try: import wmi import salt.utils.winapi _HAS_MODULE_DEPENDENCIES = True except ImportError: _HAS_MODULE_DEPENDENCIES = False _DEFAULT_SERVER = 'SmtpSvc/1' _WMI_NAMESPACE = 'MicrosoftIISv2' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_smtp_server' def __virtual__(): ''' Only works on Windows systems. ''' if salt.utils.platform.is_windows() and _HAS_MODULE_DEPENDENCIES: return __virtualname__ return False def _get_wmi_setting(wmi_class_name, setting, server): ''' Get the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class([setting], Name=server)[0] ret = getattr(objs, setting) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) return ret def _set_wmi_setting(wmi_class_name, setting, value, server): ''' Set the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) try: setattr(objs, setting, value) return True except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) return False def _normalize_server_settings(**settings): ''' Convert setting values that had been improperly converted to a dict back to a string. ''' ret = dict() settings = salt.utils.args.clean_kwargs(**settings) for setting in settings: if isinstance(settings[setting], dict): _LOG.debug('Fixing value: %s', settings[setting]) value_from_key = next(six.iterkeys(settings[setting])) ret[setting] = "{{{0}}}".format(value_from_key) else: ret[setting] = settings[setting] return ret def get_log_format_types(): ''' Get all available log format names and ids. :return: A dictionary of the log format names and ids. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format_types ''' ret = dict() prefix = 'logging/' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IISLogModuleSetting() # Remove the prefix from the name. for obj in objs: name = six.text_type(obj.Name).replace(prefix, '', 1) ret[name] = six.text_type(obj.LogModuleId) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IISLogModuleSetting: %s', error) if not ret: _LOG.error('Unable to get log format types.') return ret def get_servers(): ''' Get the SMTP virtual server names. :return: A list of the SMTP virtual servers. :rtype: list CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_servers ''' ret = list() with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting() for obj in objs: ret.append(six.text_type(obj.Name)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) _LOG.debug('Found SMTP servers: %s', ret) return ret def get_server_setting(settings, server=_DEFAULT_SERVER): ''' Get the value of the setting for the SMTP virtual server. :param str settings: A list of the setting names. :param str server: The SMTP server name. :return: A dictionary of the provided settings and their values. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_server_setting settings="['MaxRecipients']" ''' ret = dict() if not settings: _LOG.warning('No settings provided.') return ret with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(settings, Name=server)[0] for setting in settings: ret[setting] = six.text_type(getattr(objs, setting)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) return ret def set_server_setting(settings, server=_DEFAULT_SERVER): ''' Set the value of the setting for the SMTP virtual server. .. note:: The setting names are case-sensitive. :param str settings: A dictionary of the setting names and their values. :param str server: The SMTP server name. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_server_setting settings="{'MaxRecipients': '500'}" ''' if not settings: _LOG.warning('No settings provided') return False # Some fields are formatted like '{data}'. Salt tries to convert these to dicts # automatically on input, so convert them back to the proper format. settings = _normalize_server_settings(**settings) current_settings = get_server_setting(settings=settings.keys(), server=server) if settings == current_settings: _LOG.debug('Settings already contain the provided values.') return True # Note that we must fetch all properties of IIsSmtpServerSetting below, since # filtering for specific properties and then attempting to set them will cause # an error like: wmi.x_wmi Unexpected COM Error -2147352567 with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) for setting in settings: if six.text_type(settings[setting]) != six.text_type(current_settings[setting]): try: setattr(objs, setting, settings[setting]) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) # Get the settings post-change so that we can verify tht all properties # were modified successfully. Track the ones that weren't. new_settings = get_server_setting(settings=settings.keys(), server=server) failed_settings = dict() for setting in settings: if six.text_type(settings[setting]) != six.text_type(new_settings[setting]): failed_settings[setting] = settings[setting] if failed_settings: _LOG.error('Failed to change settings: %s', failed_settings) return False _LOG.debug('Settings configured successfully: %s', settings.keys()) return True def set_log_format(log_format, server=_DEFAULT_SERVER): ''' Set the active log format for the SMTP virtual server. :param str log_format: The log format name. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_log_format 'Microsoft IIS Log File Format' ''' setting = 'LogPluginClsid' log_format_types = get_log_format_types() format_id = log_format_types.get(log_format, None) if not format_id: message = ("Invalid log format '{0}' specified. Valid formats:" ' {1}').format(log_format, log_format_types.keys()) raise SaltInvocationError(message) _LOG.debug("Id for '%s' found: %s", log_format, format_id) current_log_format = get_log_format(server) if log_format == current_log_format: _LOG.debug('%s already contains the provided format.', setting) return True _set_wmi_setting('IIsSmtpServerSetting', setting, format_id, server) new_log_format = get_log_format(server) ret = log_format == new_log_format if ret: _LOG.debug("Setting %s configured successfully: %s", setting, log_format) else: _LOG.error("Unable to configure %s with value: %s", setting, log_format) return ret def get_connection_ip_list(as_wmi_format=False, server=_DEFAULT_SERVER): ''' Get the IPGrant list for the SMTP virtual server. :param bool as_wmi_format: Returns the connection IPs as a list in the format WMI expects. :param str server: The SMTP server name. :return: A dictionary of the IP and subnet pairs. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_connection_ip_list ''' ret = dict() setting = 'IPGrant' reg_separator = r',\s*' if as_wmi_format: ret = list() addresses = _get_wmi_setting('IIsIPSecuritySetting', setting, server) # WMI returns the addresses as a tuple of unicode strings, each representing # an address/subnet pair. Remove extra spaces that may be present. for unnormalized_address in addresses: ip_address, subnet = re.split(reg_separator, unnormalized_address) if as_wmi_format: ret.append('{0}, {1}'.format(ip_address, subnet)) else: ret[ip_address] = subnet if not ret: _LOG.debug('%s is empty.', setting) return ret def set_connection_ip_list(addresses=None, grant_by_default=False, server=_DEFAULT_SERVER): ''' Set the IPGrant list for the SMTP virtual server. :param str addresses: A dictionary of IP + subnet pairs. :param bool grant_by_default: Whether the addresses should be a blacklist or whitelist. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}" ''' setting = 'IPGrant' formatted_addresses = list() # It's okay to accept an empty list for set_connection_ip_list, # since an empty list may be desirable. if not addresses: addresses = dict() _LOG.debug('Empty %s specified.', setting) # Convert addresses to the 'ip_address, subnet' format used by # IIsIPSecuritySetting. for address in addresses: formatted_addresses.append('{0}, {1}'.format(address.strip(), addresses[address].strip())) current_addresses = get_connection_ip_list(as_wmi_format=True, server=server) # Order is not important, so compare to the current addresses as unordered sets. if set(formatted_addresses) == set(current_addresses): _LOG.debug('%s already contains the provided addresses.', setting) return True # First we should check GrantByDefault, and change it if necessary. current_grant_by_default = _get_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', server) if grant_by_default != current_grant_by_default: _LOG.debug('Setting GrantByDefault to: %s', grant_by_default) _set_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', grant_by_default, server) _set_wmi_setting('IIsIPSecuritySetting', setting, formatted_addresses, server) new_addresses = get_connection_ip_list(as_wmi_format=True, server=server) ret = set(formatted_addresses) == set(new_addresses) if ret: _LOG.debug('%s configured successfully: %s', setting, formatted_addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, formatted_addresses) return ret def get_relay_ip_list(server=_DEFAULT_SERVER): ''' Get the RelayIpList list for the SMTP virtual server. :param str server: The SMTP server name. :return: A list of the relay IPs. :rtype: list .. note:: A return value of None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_relay_ip_list ''' ret = list() setting = 'RelayIpList' lines = _get_wmi_setting('IIsSmtpServerSetting', setting, server) if not lines: _LOG.debug('%s is empty: %s', setting, lines) if lines is None: lines = [None] return list(lines) # WMI returns the addresses as a tuple of individual octets, so we # need to group them and reassemble them into IP addresses. i = 0 while i < len(lines): octets = [six.text_type(x) for x in lines[i: i + 4]] address = '.'.join(octets) ret.append(address) i += 4 return ret def set_relay_ip_list(addresses=None, server=_DEFAULT_SERVER): ''' Set the RelayIpList list for the SMTP virtual server. Due to the unusual way that Windows stores the relay IPs, it is advisable to retrieve the existing list you wish to set from a pre-configured server. For example, setting '127.0.0.1' as an allowed relay IP through the GUI would generate an actual relay IP list similar to the following: .. code-block:: cfg ['24.0.0.128', '32.0.0.128', '60.0.0.128', '68.0.0.128', '1.0.0.0', '76.0.0.0', '0.0.0.0', '0.0.0.0', '1.0.0.0', '1.0.0.0', '2.0.0.0', '2.0.0.0', '4.0.0.0', '0.0.0.0', '76.0.0.128', '0.0.0.0', '0.0.0.0', '0.0.0.0', '0.0.0.0', '255.255.255.255', '127.0.0.1'] .. note:: Setting the list to None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list configured, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. :param str addresses: A list of the relay IPs. The order of the list is important. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_relay_ip_list addresses="['192.168.1.1', '172.16.1.1']" ''' setting = 'RelayIpList' formatted_addresses = list() current_addresses = get_relay_ip_list(server) if list(addresses) == current_addresses: _LOG.debug('%s already contains the provided addresses.', setting) return True if addresses: # The WMI input data needs to be in the format used by RelayIpList. Order # is also important due to the way RelayIpList orders the address list. if addresses[0] is None: formatted_addresses = None else: for address in addresses: for octet in address.split('.'): formatted_addresses.append(octet) _LOG.debug('Formatted %s addresses: %s', setting, formatted_addresses) _set_wmi_setting('IIsSmtpServerSetting', setting, formatted_addresses, server) new_addresses = get_relay_ip_list(server) ret = list(addresses) == new_addresses if ret: _LOG.debug('%s configured successfully: %s', setting, addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, addresses) return ret
saltstack/salt
salt/modules/win_smtp_server.py
set_log_format
python
def set_log_format(log_format, server=_DEFAULT_SERVER): ''' Set the active log format for the SMTP virtual server. :param str log_format: The log format name. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_log_format 'Microsoft IIS Log File Format' ''' setting = 'LogPluginClsid' log_format_types = get_log_format_types() format_id = log_format_types.get(log_format, None) if not format_id: message = ("Invalid log format '{0}' specified. Valid formats:" ' {1}').format(log_format, log_format_types.keys()) raise SaltInvocationError(message) _LOG.debug("Id for '%s' found: %s", log_format, format_id) current_log_format = get_log_format(server) if log_format == current_log_format: _LOG.debug('%s already contains the provided format.', setting) return True _set_wmi_setting('IIsSmtpServerSetting', setting, format_id, server) new_log_format = get_log_format(server) ret = log_format == new_log_format if ret: _LOG.debug("Setting %s configured successfully: %s", setting, log_format) else: _LOG.error("Unable to configure %s with value: %s", setting, log_format) return ret
Set the active log format for the SMTP virtual server. :param str log_format: The log format name. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_log_format 'Microsoft IIS Log File Format'
train
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_smtp_server.py#L319-L361
[ "def _set_wmi_setting(wmi_class_name, setting, value, server):\n '''\n Set the value of the setting for the provided class.\n '''\n with salt.utils.winapi.Com():\n try:\n connection = wmi.WMI(namespace=_WMI_NAMESPACE)\n wmi_class = getattr(connection, wmi_class_name)\n\n objs = wmi_class(Name=server)[0]\n except wmi.x_wmi as error:\n _LOG.error('Encountered WMI error: %s', error.com_error)\n except (AttributeError, IndexError) as error:\n _LOG.error('Error getting %s: %s', wmi_class_name, error)\n\n try:\n setattr(objs, setting, value)\n return True\n except wmi.x_wmi as error:\n _LOG.error('Encountered WMI error: %s', error.com_error)\n except AttributeError as error:\n _LOG.error('Error setting %s: %s', setting, error)\n return False\n", "def get_log_format_types():\n '''\n Get all available log format names and ids.\n\n :return: A dictionary of the log format names and ids.\n :rtype: dict\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' win_smtp_server.get_log_format_types\n '''\n ret = dict()\n prefix = 'logging/'\n\n with salt.utils.winapi.Com():\n try:\n connection = wmi.WMI(namespace=_WMI_NAMESPACE)\n objs = connection.IISLogModuleSetting()\n\n # Remove the prefix from the name.\n for obj in objs:\n name = six.text_type(obj.Name).replace(prefix, '', 1)\n ret[name] = six.text_type(obj.LogModuleId)\n except wmi.x_wmi as error:\n _LOG.error('Encountered WMI error: %s', error.com_error)\n except (AttributeError, IndexError) as error:\n _LOG.error('Error getting IISLogModuleSetting: %s', error)\n\n if not ret:\n _LOG.error('Unable to get log format types.')\n return ret\n", "def get_log_format(server=_DEFAULT_SERVER):\n '''\n Get the active log format for the SMTP virtual server.\n\n :param str server: The SMTP server name.\n\n :return: A string of the log format name.\n :rtype: str\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' win_smtp_server.get_log_format\n '''\n log_format_types = get_log_format_types()\n format_id = _get_wmi_setting('IIsSmtpServerSetting', 'LogPluginClsid', server)\n\n # Since IIsSmtpServerSetting stores the log type as an id, we need\n # to get the mapping from IISLogModuleSetting and extract the name.\n for key in log_format_types:\n if six.text_type(format_id) == log_format_types[key]:\n return key\n _LOG.warning('Unable to determine log format.')\n return None\n" ]
# -*- coding: utf-8 -*- ''' Module for managing IIS SMTP server configuration on Windows servers. The Windows features 'SMTP-Server' and 'Web-WMI' must be installed. :depends: wmi ''' # IIS metabase configuration settings: # https://goo.gl/XCt1uO # IIS logging options: # https://goo.gl/RL8ki9 # https://goo.gl/iwnDow # MicrosoftIISv2 namespace in Windows 2008r2 and later: # http://goo.gl/O4m48T # Connection and relay IPs in PowerShell: # https://goo.gl/aBMZ9K # http://goo.gl/MrybFq # Import python libs from __future__ import absolute_import, unicode_literals, print_function import logging import re # Import Salt libs from salt.exceptions import SaltInvocationError import salt.utils.args import salt.utils.platform # Import 3rd-party libs from salt.ext import six try: import wmi import salt.utils.winapi _HAS_MODULE_DEPENDENCIES = True except ImportError: _HAS_MODULE_DEPENDENCIES = False _DEFAULT_SERVER = 'SmtpSvc/1' _WMI_NAMESPACE = 'MicrosoftIISv2' _LOG = logging.getLogger(__name__) # Define the module's virtual name __virtualname__ = 'win_smtp_server' def __virtual__(): ''' Only works on Windows systems. ''' if salt.utils.platform.is_windows() and _HAS_MODULE_DEPENDENCIES: return __virtualname__ return False def _get_wmi_setting(wmi_class_name, setting, server): ''' Get the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class([setting], Name=server)[0] ret = getattr(objs, setting) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) return ret def _set_wmi_setting(wmi_class_name, setting, value, server): ''' Set the value of the setting for the provided class. ''' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) wmi_class = getattr(connection, wmi_class_name) objs = wmi_class(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting %s: %s', wmi_class_name, error) try: setattr(objs, setting, value) return True except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) return False def _normalize_server_settings(**settings): ''' Convert setting values that had been improperly converted to a dict back to a string. ''' ret = dict() settings = salt.utils.args.clean_kwargs(**settings) for setting in settings: if isinstance(settings[setting], dict): _LOG.debug('Fixing value: %s', settings[setting]) value_from_key = next(six.iterkeys(settings[setting])) ret[setting] = "{{{0}}}".format(value_from_key) else: ret[setting] = settings[setting] return ret def get_log_format_types(): ''' Get all available log format names and ids. :return: A dictionary of the log format names and ids. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format_types ''' ret = dict() prefix = 'logging/' with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IISLogModuleSetting() # Remove the prefix from the name. for obj in objs: name = six.text_type(obj.Name).replace(prefix, '', 1) ret[name] = six.text_type(obj.LogModuleId) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IISLogModuleSetting: %s', error) if not ret: _LOG.error('Unable to get log format types.') return ret def get_servers(): ''' Get the SMTP virtual server names. :return: A list of the SMTP virtual servers. :rtype: list CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_servers ''' ret = list() with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting() for obj in objs: ret.append(six.text_type(obj.Name)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) _LOG.debug('Found SMTP servers: %s', ret) return ret def get_server_setting(settings, server=_DEFAULT_SERVER): ''' Get the value of the setting for the SMTP virtual server. :param str settings: A list of the setting names. :param str server: The SMTP server name. :return: A dictionary of the provided settings and their values. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_server_setting settings="['MaxRecipients']" ''' ret = dict() if not settings: _LOG.warning('No settings provided.') return ret with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(settings, Name=server)[0] for setting in settings: ret[setting] = six.text_type(getattr(objs, setting)) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) return ret def set_server_setting(settings, server=_DEFAULT_SERVER): ''' Set the value of the setting for the SMTP virtual server. .. note:: The setting names are case-sensitive. :param str settings: A dictionary of the setting names and their values. :param str server: The SMTP server name. :return: A boolean representing whether all changes succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_server_setting settings="{'MaxRecipients': '500'}" ''' if not settings: _LOG.warning('No settings provided') return False # Some fields are formatted like '{data}'. Salt tries to convert these to dicts # automatically on input, so convert them back to the proper format. settings = _normalize_server_settings(**settings) current_settings = get_server_setting(settings=settings.keys(), server=server) if settings == current_settings: _LOG.debug('Settings already contain the provided values.') return True # Note that we must fetch all properties of IIsSmtpServerSetting below, since # filtering for specific properties and then attempting to set them will cause # an error like: wmi.x_wmi Unexpected COM Error -2147352567 with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(Name=server)[0] except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except (AttributeError, IndexError) as error: _LOG.error('Error getting IIsSmtpServerSetting: %s', error) for setting in settings: if six.text_type(settings[setting]) != six.text_type(current_settings[setting]): try: setattr(objs, setting, settings[setting]) except wmi.x_wmi as error: _LOG.error('Encountered WMI error: %s', error.com_error) except AttributeError as error: _LOG.error('Error setting %s: %s', setting, error) # Get the settings post-change so that we can verify tht all properties # were modified successfully. Track the ones that weren't. new_settings = get_server_setting(settings=settings.keys(), server=server) failed_settings = dict() for setting in settings: if six.text_type(settings[setting]) != six.text_type(new_settings[setting]): failed_settings[setting] = settings[setting] if failed_settings: _LOG.error('Failed to change settings: %s', failed_settings) return False _LOG.debug('Settings configured successfully: %s', settings.keys()) return True def get_log_format(server=_DEFAULT_SERVER): ''' Get the active log format for the SMTP virtual server. :param str server: The SMTP server name. :return: A string of the log format name. :rtype: str CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_log_format ''' log_format_types = get_log_format_types() format_id = _get_wmi_setting('IIsSmtpServerSetting', 'LogPluginClsid', server) # Since IIsSmtpServerSetting stores the log type as an id, we need # to get the mapping from IISLogModuleSetting and extract the name. for key in log_format_types: if six.text_type(format_id) == log_format_types[key]: return key _LOG.warning('Unable to determine log format.') return None def get_connection_ip_list(as_wmi_format=False, server=_DEFAULT_SERVER): ''' Get the IPGrant list for the SMTP virtual server. :param bool as_wmi_format: Returns the connection IPs as a list in the format WMI expects. :param str server: The SMTP server name. :return: A dictionary of the IP and subnet pairs. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_connection_ip_list ''' ret = dict() setting = 'IPGrant' reg_separator = r',\s*' if as_wmi_format: ret = list() addresses = _get_wmi_setting('IIsIPSecuritySetting', setting, server) # WMI returns the addresses as a tuple of unicode strings, each representing # an address/subnet pair. Remove extra spaces that may be present. for unnormalized_address in addresses: ip_address, subnet = re.split(reg_separator, unnormalized_address) if as_wmi_format: ret.append('{0}, {1}'.format(ip_address, subnet)) else: ret[ip_address] = subnet if not ret: _LOG.debug('%s is empty.', setting) return ret def set_connection_ip_list(addresses=None, grant_by_default=False, server=_DEFAULT_SERVER): ''' Set the IPGrant list for the SMTP virtual server. :param str addresses: A dictionary of IP + subnet pairs. :param bool grant_by_default: Whether the addresses should be a blacklist or whitelist. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_connection_ip_list addresses="{'127.0.0.1': '255.255.255.255'}" ''' setting = 'IPGrant' formatted_addresses = list() # It's okay to accept an empty list for set_connection_ip_list, # since an empty list may be desirable. if not addresses: addresses = dict() _LOG.debug('Empty %s specified.', setting) # Convert addresses to the 'ip_address, subnet' format used by # IIsIPSecuritySetting. for address in addresses: formatted_addresses.append('{0}, {1}'.format(address.strip(), addresses[address].strip())) current_addresses = get_connection_ip_list(as_wmi_format=True, server=server) # Order is not important, so compare to the current addresses as unordered sets. if set(formatted_addresses) == set(current_addresses): _LOG.debug('%s already contains the provided addresses.', setting) return True # First we should check GrantByDefault, and change it if necessary. current_grant_by_default = _get_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', server) if grant_by_default != current_grant_by_default: _LOG.debug('Setting GrantByDefault to: %s', grant_by_default) _set_wmi_setting('IIsIPSecuritySetting', 'GrantByDefault', grant_by_default, server) _set_wmi_setting('IIsIPSecuritySetting', setting, formatted_addresses, server) new_addresses = get_connection_ip_list(as_wmi_format=True, server=server) ret = set(formatted_addresses) == set(new_addresses) if ret: _LOG.debug('%s configured successfully: %s', setting, formatted_addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, formatted_addresses) return ret def get_relay_ip_list(server=_DEFAULT_SERVER): ''' Get the RelayIpList list for the SMTP virtual server. :param str server: The SMTP server name. :return: A list of the relay IPs. :rtype: list .. note:: A return value of None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_relay_ip_list ''' ret = list() setting = 'RelayIpList' lines = _get_wmi_setting('IIsSmtpServerSetting', setting, server) if not lines: _LOG.debug('%s is empty: %s', setting, lines) if lines is None: lines = [None] return list(lines) # WMI returns the addresses as a tuple of individual octets, so we # need to group them and reassemble them into IP addresses. i = 0 while i < len(lines): octets = [six.text_type(x) for x in lines[i: i + 4]] address = '.'.join(octets) ret.append(address) i += 4 return ret def set_relay_ip_list(addresses=None, server=_DEFAULT_SERVER): ''' Set the RelayIpList list for the SMTP virtual server. Due to the unusual way that Windows stores the relay IPs, it is advisable to retrieve the existing list you wish to set from a pre-configured server. For example, setting '127.0.0.1' as an allowed relay IP through the GUI would generate an actual relay IP list similar to the following: .. code-block:: cfg ['24.0.0.128', '32.0.0.128', '60.0.0.128', '68.0.0.128', '1.0.0.0', '76.0.0.0', '0.0.0.0', '0.0.0.0', '1.0.0.0', '1.0.0.0', '2.0.0.0', '2.0.0.0', '4.0.0.0', '0.0.0.0', '76.0.0.128', '0.0.0.0', '0.0.0.0', '0.0.0.0', '0.0.0.0', '255.255.255.255', '127.0.0.1'] .. note:: Setting the list to None corresponds to the restrictive 'Only the list below' GUI parameter with an empty access list configured, and setting an empty list/tuple corresponds to the more permissive 'All except the list below' GUI parameter. :param str addresses: A list of the relay IPs. The order of the list is important. :param str server: The SMTP server name. :return: A boolean representing whether the change succeeded. :rtype: bool CLI Example: .. code-block:: bash salt '*' win_smtp_server.set_relay_ip_list addresses="['192.168.1.1', '172.16.1.1']" ''' setting = 'RelayIpList' formatted_addresses = list() current_addresses = get_relay_ip_list(server) if list(addresses) == current_addresses: _LOG.debug('%s already contains the provided addresses.', setting) return True if addresses: # The WMI input data needs to be in the format used by RelayIpList. Order # is also important due to the way RelayIpList orders the address list. if addresses[0] is None: formatted_addresses = None else: for address in addresses: for octet in address.split('.'): formatted_addresses.append(octet) _LOG.debug('Formatted %s addresses: %s', setting, formatted_addresses) _set_wmi_setting('IIsSmtpServerSetting', setting, formatted_addresses, server) new_addresses = get_relay_ip_list(server) ret = list(addresses) == new_addresses if ret: _LOG.debug('%s configured successfully: %s', setting, addresses) return ret _LOG.error('Unable to configure %s with value: %s', setting, addresses) return ret